repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
fenderglass/ABruijn | flye/utils/sam_parser.py | 1 | 17011 | #(c) 2019 by Authors
#This file is a part of Flye program.
#Released under the BSD license (see LICENSE file)
"""
Provides multithreaded parser for SAM files
"""
from __future__ import absolute_import
from __future__ import division
import os
import re
import sys
from collections import namedtuple, defaultdict
import subprocess
import logging
import multiprocessing
import ctypes
import time
import gzip
import io
import random
#In Python2, everything is bytes (=str)
#In Python3, we are doing IO in bytes, but everywhere else strngs = unicode
if sys.version_info < (3, 0):
from string import maketrans
_STR = lambda x: x
_BYTES = lambda x: x
else:
maketrans = bytes.maketrans
_STR = bytes.decode
_BYTES = str.encode
from flye.six.moves import range
from flye.six import iteritems
import flye.utils.fasta_parser as fp
logger = logging.getLogger()
SAMTOOLS_BIN = "flye-samtools"
Alignment = namedtuple("Alignment", ["qry_id", "trg_id", "qry_start", "qry_end",
"qry_sign", "qry_len", "trg_start",
"trg_end", "trg_sign", "trg_len",
"qry_seq", "trg_seq", "err_rate",
"is_secondary"])
class AlignmentException(Exception):
pass
class PafHit(object):
"""
Stores paf alignment
"""
__slots__ = ("query", "query_length", "query_start", "query_end",
"target", "target_length", "target_start", "target_end")
def __init__(self, raw_hit):
hit = raw_hit.split()
self.query = hit[0]
self.query_length = int(hit[1])
self.query_start = int(hit[2])
self.query_end = int(hit[3])
self.target = hit[5]
self.target_length = int(hit[6])
self.target_start = int(hit[7])
self.target_end = int(hit[8])
def query_mapping_length(self):
return self.query_end - self.query_start + 1
def target_mapping_length(self):
return self.target_end - self.target_start + 1
def query_left_overhang(self):
return self.query_start
def query_right_overhang(self):
return self.query_length - self.query_end + 1
def target_left_overhang(self):
return self.target_start
def target_right_overhang(self):
return self.target_length - self.target_end + 1
def read_paf(filename):
"""
Streams out paf alignments
"""
with open(filename, "rb") as f:
for raw_hit in f:
yield PafHit(_STR(raw_hit))
def read_paf_grouped(filename):
"""
Outputs chunks of alignments for each (query, target)pair.
Assumes that PAF alignment is already sorted by query.
"""
prev_hit = None
target_hits = defaultdict(list)
for hit in read_paf(filename):
if prev_hit is not None and hit.query != prev_hit.query:
for trg in sorted(target_hits):
yield target_hits[trg]
target_hits = defaultdict(list)
target_hits[hit.target].append(hit)
prev_hit = hit
if len(target_hits):
for trg in sorted(target_hits):
yield target_hits[trg]
class SynchronizedSamReader(object):
"""
Parses SAM file in multiple threads.
"""
def __init__(self, sam_alignment, reference_fasta,
max_coverage=None, use_secondary=False):
#check that alignment exists
if not os.path.exists(sam_alignment):
raise AlignmentException("Can't open {0}".format(sam_alignment))
#will not be changed during exceution, each process has its own copy
self.aln_path = sam_alignment
self.ref_fasta = {_BYTES(h) : _BYTES(s)
for (h, s) in iteritems(reference_fasta)}
self.change_strand = True
self.max_coverage = max_coverage
self.use_secondary = use_secondary
self.cigar_parser = re.compile(b"[0-9]+[MIDNSHP=X]")
#will be shared between processes
self.shared_manager = multiprocessing.Manager()
self.shared_reader_queue = self.shared_manager.Queue()
self.shared_num_jobs = multiprocessing.Value(ctypes.c_int, 0)
self.shared_lock = self.shared_manager.Lock()
self.shared_eof = multiprocessing.Value(ctypes.c_bool, False)
#specific to IO thread
self.io_thread = None
self.terminate_flag = False
self.processed_contigs = set()
self.chunk_buffer = []
self.current_contig = None
#start IO thread
self.io_thread = \
multiprocessing.Process(target=SynchronizedSamReader._io_thread_worker,
args=(self,))
self.io_thread.start()
#print("Init IO thread")
def close(self):
self.terminate_flag = True
self.io_thread.join()
#print("Close IO thread")
def _read_file_chunk(self, aln_file):
"""
Reads a chunk for a single contig. Assuming it it only
run in single (and same) thread, and synchonized outside
"""
parsed_contig = None
chunk_to_return = []
for line in aln_file:
if not line:
break
if _is_sam_header(line): continue
tab_1 = line.find(b"\t")
tab_2 = line.find(b"\t", tab_1 + 1)
tab_3 = line.find(b"\t", tab_2 + 1)
if tab_2 == -1 or tab_3 == -1:
#raise AlignmentException("Error reading SAM file")
continue
read_contig = line[tab_2 + 1 : tab_3]
if read_contig in self.processed_contigs:
raise AlignmentException("Alignment file is not sorted")
if read_contig != self.current_contig:
prev_contig = self.current_contig
self.current_contig = read_contig
if prev_contig is not None:
self.processed_contigs.add(prev_contig)
parsed_contig = prev_contig
chunk_to_return = self.chunk_buffer
self.chunk_buffer = [line]
break
else:
self.chunk_buffer = [line]
else:
self.chunk_buffer.append(line)
#hit end of file
if not parsed_contig and self.chunk_buffer:
parsed_contig = self.current_contig
chunk_to_return = self.chunk_buffer
self.chunk_buffer = []
return parsed_contig, chunk_to_return
def _io_thread_worker(self):
"""
This function reads the SAM file in a separate thread as needed.
"""
PRE_READ = 30
sam_eof = False
if self.aln_path.endswith(".gz"):
gz = gzip.open(self.aln_path, "rb")
aln_file = io.BufferedReader(gz)
elif self.aln_path.endswith(".bam"):
aln_file = subprocess.Popen(SAMTOOLS_BIN + " view -@4 " + self.aln_path,
shell=True, stdout=subprocess.PIPE).stdout
else:
aln_file = open(self.aln_path, "rb")
while True:
if self.terminate_flag:
return
#reached EOF and everything was read from the queue
if sam_eof and self.shared_num_jobs.value == 0:
self.shared_eof.value = True
#print("IO thread: finished")
return
if not sam_eof and self.shared_num_jobs.value < PRE_READ:
#with self.shared_ctx.lock:
#print("IO thread: Q size: ", self.shared_ctx.reader_queue.qsize())
ctg_id, chunk = self._read_file_chunk(aln_file)
if ctg_id is not None:
with self.shared_lock:
self.shared_reader_queue.put(ctg_id)
for line in chunk:
self.shared_reader_queue.put(line)
self.shared_reader_queue.put(None)
self.shared_num_jobs.value += 1
else:
sam_eof = True
time.sleep(0.01)
def is_eof(self):
return self.shared_eof.value
def _parse_cigar(self, cigar_str, read_str, ctg_name, ctg_pos):
ctg_str = self.ref_fasta[ctg_name]
trg_seq = []
qry_seq = []
trg_start = ctg_pos - 1
trg_pos = ctg_pos - 1
qry_start = 0
qry_pos = 0
left_hard = True
left_soft = True
hard_clipped_left = 0
hard_clipped_right = 0
soft_clipped_left = 0
soft_clipped_right = 0
for token in self.cigar_parser.findall(cigar_str):
size, op = int(token[:-1]), token[-1:]
if op == b"H":
if left_hard:
qry_start += size
hard_clipped_left += size
else:
hard_clipped_right += size
elif op == b"S":
qry_pos += size
if left_soft:
soft_clipped_left += size
else:
soft_clipped_right += size
elif op == b"M":
qry_seq.append(read_str[qry_pos : qry_pos + size].upper())
trg_seq.append(ctg_str[trg_pos : trg_pos + size].upper())
qry_pos += size
trg_pos += size
elif op == b"I":
qry_seq.append(read_str[qry_pos : qry_pos + size].upper())
trg_seq.append(b"-" * size)
qry_pos += size
elif op == b"D":
qry_seq.append(b"-" * size)
trg_seq.append(ctg_str[trg_pos : trg_pos + size].upper())
trg_pos += size
else:
raise AlignmentException("Unsupported CIGAR operation: " + str(op))
left_hard = False
if op != b"H":
left_soft = False
trg_seq = b"".join(trg_seq)
qry_seq = b"".join(qry_seq)
matches = 0
for i in range(len(trg_seq)):
if trg_seq[i] == qry_seq[i]:
matches += 1
err_rate = 1 - matches / len(trg_seq)
trg_end = trg_pos
qry_end = qry_pos + hard_clipped_left
qry_len = qry_end + hard_clipped_right
qry_start += soft_clipped_left
qry_end -= soft_clipped_right
return (trg_start, trg_end, len(ctg_str), trg_seq,
qry_start, qry_end, qry_len, qry_seq, err_rate)
def get_chunk(self):
"""
Gets a chunk - safe to use from multiple processes in parallel
"""
#fetching data from the IO thread
parsed_contig = None
chunk_buffer = None
while True:
with self.shared_lock:
if self.shared_eof.value:
return None, []
if self.shared_num_jobs.value > 0:
parsed_contig = self.shared_reader_queue.get()
chunk_buffer = []
while True:
line = self.shared_reader_queue.get()
if line is not None:
chunk_buffer.append(line)
else:
break
self.shared_num_jobs.value -= 1
break
time.sleep(0.01)
###
#shuffle alignments so that they uniformly distributed. Use same seed for determinism
random.Random(42).shuffle(chunk_buffer)
sequence_length = 0
alignments = []
for line in chunk_buffer:
tokens = line.strip().split()
if len(tokens) < 11:
#raise AlignmentException("Error reading SAM file")
continue
flags = int(tokens[1])
is_unmapped = flags & 0x4
is_secondary = flags & 0x100
#is_supplementary = flags & 0x800 #allow supplementary
#if is_unmapped or is_secondary: continue
if is_unmapped: continue
if is_secondary and not self.use_secondary: continue
read_id = tokens[0]
read_contig = tokens[2]
cigar_str = tokens[5]
read_str = tokens[9]
ctg_pos = int(tokens[3])
is_reversed = flags & 0x16
is_secondary = flags & 0x100
if read_str == b"*":
raise Exception("Error parsing SAM: record without read sequence")
(trg_start, trg_end, trg_len, trg_seq,
qry_start, qry_end, qry_len, qry_seq, err_rate) = \
self._parse_cigar(cigar_str, read_str, read_contig, ctg_pos)
#OVERHANG = cfg.vals["read_aln_overhang"]
#if (float(qry_end - qry_start) / qry_len > self.min_aln_rate or
# trg_start < OVERHANG or trg_len - trg_end < OVERHANG):
aln = Alignment(_STR(read_id), _STR(read_contig),
qry_start, qry_end, "-" if is_reversed else "+", qry_len,
trg_start, trg_end, "+", trg_len,
_STR(qry_seq), _STR(trg_seq),
err_rate, is_secondary)
alignments.append(aln)
sequence_length += qry_end - qry_start
contig_length = len(self.ref_fasta[parsed_contig])
if sequence_length // contig_length > self.max_coverage:
break
#then, alignments by read and by score
alignments.sort(key=lambda a: (a.qry_id, -(a.qry_end - a.qry_start)))
if parsed_contig is None:
return None, []
return _STR(parsed_contig), alignments
"""
def preprocess_sam(sam_file, work_dir):
#Proprocesses minimap2 output by adding SEQ
#to secondary alignments, removing
#unaligned reads and then sorting
#file by reference sequence id
expanded_sam = sam_file + "_expanded"
merged_file = sam_file + "_merged"
sorted_file = sam_file + "_sorted"
#puting SAM headers to the final postprocessed file first
with open(sam_file, "rb") as hdr_in, open(merged_file, "wb") as fout:
for line in hdr_in:
if not _is_sam_header(line):
break
fout.write(line)
#adding SEQ fields to secondary alignments
with open(sam_file, "rb") as fin, open(expanded_sam, "wb") as fout:
prev_id = None
prev_seq = None
primary_reversed = None
for line in fin:
if _is_sam_header(line):
continue
tokens = line.strip().split()
flags = int(tokens[1])
is_unmapped = flags & 0x4
is_secondary = flags & 0x100
is_supplementary = flags & 0x800
is_reversed = flags & 0x16
if is_unmapped:
continue
read_id, cigar_str, read_seq = tokens[0], tokens[5], tokens[9]
has_hard_clipped = b"H" in cigar_str
#Checking format assumptions
if has_hard_clipped:
if is_secondary:
raise Exception("Secondary alignment with hard-clipped bases")
if not is_supplementary:
raise Exception("Primary alignment with hard-clipped bases")
if not is_secondary and read_seq == b"*":
raise Exception("Missing SEQ for non-secondary alignment")
if read_seq == b"*":
if read_id != prev_id:
raise Exception("SAM file is not sorted by read names")
if is_reversed == primary_reversed:
tokens[9] = prev_seq
else:
tokens[9] = fp.reverse_complement_bytes(prev_seq)
#Assuming that the first read alignmnent in SAM is primary
elif prev_id != read_id:
if has_hard_clipped:
raise Exception("Hard clipped bases in the primamry read")
prev_id = read_id
prev_seq = read_seq
primary_reversed = is_reversed
fout.write(b"\t".join(tokens) + b"\n")
#don't need the original SAM anymore, cleaning up space
os.remove(sam_file)
#logger.debug("Sorting alignment file")
env = os.environ.copy()
env["LC_ALL"] = "C"
subprocess.check_call(["sort", "-k", "3,3", "-T", work_dir, expanded_sam],
stdout=open(sorted_file, "wb"), env=env)
#don't need the expanded file anymore
os.remove(expanded_sam)
#appending to the final file, that already contains headers
with open(sorted_file, "rb") as sort_in, open(merged_file, "ab") as fout:
for line in sort_in:
if not _is_sam_header(line):
fout.write(line)
os.remove(sorted_file)
os.rename(merged_file, sam_file)
"""
def _is_sam_header(line):
return line[:3] in [b"@PG", b"@HD", b"@SQ", b"@RG", b"@CO"]
| bsd-3-clause |
hongliuuuu/Results_Dis | finalMe/AMKLrbf.py | 1 | 6721 | from sklearn.kernel_approximation import (RBFSampler,Nystroem)
from sklearn.ensemble import RandomForestClassifier
import pandas
import numpy as np
import random
from sklearn.svm import SVC
from sklearn.metrics.pairwise import rbf_kernel,laplacian_kernel,chi2_kernel,linear_kernel,polynomial_kernel,cosine_similarity
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
def splitdata(X,Y,ratio,seed):
'''This function is to split the data into train and test data randomly and preserve the pos/neg ratio'''
n_samples = X.shape[0]
y = Y.astype(int)
y_bin = np.bincount(y)
classes = np.nonzero(y_bin)[0]
#fint the indices for each class
indices = []
for i in classes:
indice = []
for j in range(n_samples):
if y[j] == i:
indice.append(j)
indices.append(indice)
train_indices = []
for i in indices:
k = int(len(i)*ratio)
train_indices += (random.Random(seed).sample(i,k=k))
#find the unused indices
s = np.bincount(train_indices,minlength=n_samples)
mask = s==0
test_indices = np.arange(n_samples)[mask]
return train_indices,test_indices
def kn(X,Y):
d = np.dot(X,Y)
dx = np.sqrt(np.dot(X,X))
dy = np.sqrt(np.dot(Y,Y))
if(dx*dy==0):
print(X,Y)
k = pow((d/(dx*dy)+1),3)
return k
def similarity(X):
n_samples = X.shape[0]
dis = np.zeros((n_samples, n_samples))
for i in range(n_samples):
dis[i][i] = 0
for i in range(n_samples):
for j in range(i + 1, n_samples):
dis[i][j] = dis[j][i] = kn(X[i],X[j])
return dis
def Lsvm_patatune(train_x,train_y):
tuned_parameters = [
{'kernel': ['precomputed'], 'C': [0.01, 0.1, 1, 10, 100, 1000]}]
clf = GridSearchCV(SVC(C=1, probability=True), tuned_parameters, cv=5, n_jobs=1
) # SVC(probability=True)#SVC(kernel="linear", probability=True)
clf.fit(train_x, train_y)
return clf.best_params_['C']
url = './MyData.csv'
dataframe = pandas.read_csv(url)#, header=None)
array = dataframe.values
X = array[:,1:]
Y = pandas.read_csv('./MyDatalabel.csv')
Y = Y.values
Y = Y[:,1:]
Y = Y.transpose()
Y = np.ravel(Y)
n_samples = X.shape[0]
n_features = X.shape[1]
for i in range(len(Y)):
if Y[i] == 4:
Y[i]=1
#X = min_max_scaler.fit_transform(X)
#X1_features = similarity(X[:, 0:2])
#X2_features = similarity(X[:, 2:21])
#X3_features = similarity(X[:, 21:])
"""
e1 = []
X1_features = polynomial_kernel(X[:, 0:2])+linear_kernel(X[:, 0:2])+rbf_kernel(X[:, 0:2])+laplacian_kernel(X[:, 0:2])
X2_features = linear_kernel(X[:, 2:21])+polynomial_kernel(X[:, 2:21])+rbf_kernel(X[:, 2:21])+laplacian_kernel(X[:, 2:21])
X3_features = linear_kernel(X[:, 21:])+polynomial_kernel(X[:, 21:])+rbf_kernel(X[:, 21:])+laplacian_kernel(X[:, 21:])
X_features = (X1_features + X2_features + X3_features)
for l in range(10):
train_indices, test_indices = splitdata(X=X, Y=Y, ratio=0.7, seed=1000 + l)
X_features1 = np.transpose(X_features)
X_features2 = X_features1[train_indices]
X_features3 = np.transpose(X_features2)
clf = SVC(kernel='precomputed')
clf.fit(X_features3[train_indices], Y[train_indices])
e1.append(clf.score(X_features3[test_indices], Y[test_indices]))
s = "combination of %d_%d_%d" % (l, l, l)
if np.mean(e1) > big:
big = np.mean(e1)
print(np.mean(e1))
print(s)
testfile.write(s + ":%f" % (np.mean(e1)) + '\n')
"""
min_max_scaler = preprocessing.MinMaxScaler()
svm_X = min_max_scaler.fit_transform(X)
min_max_scaler = preprocessing.MinMaxScaler()
X_new = X
X = min_max_scaler.fit_transform(X)
for r in range(3):
if r==0:
R = 0.3
elif r==1:
R = 0.5
else:
R = 0.7
testfile = open("AMKLcombinationTest%f.txt" % R, 'w')
big = 0
mm = ""
err = 0
for i in range(5):
for j in range(5):
for k in range(5):
if(i==0):
X1_features = polynomial_kernel(X[:, 0:2])
elif(i==1):
X1_features = linear_kernel(X[:, 0:2])
elif(i==2):
X1_features = rbf_kernel(X[:, 0:2])
elif(i==3):
X1_features = laplacian_kernel(X[:, 0:2])
elif (i == 4):
X1_features = similarity(X_new[:, 0:2])
if (j == 0):
X2_features = polynomial_kernel(X[:, 2:21])
elif (j == 1):
X2_features = linear_kernel(X[:, 2:21])
elif (j == 2):
X2_features = rbf_kernel(X[:, 2:21])
elif (j == 3):
X2_features = laplacian_kernel(X[:, 2:21])
elif (j == 4):
X2_features = similarity(X_new[:, 2:21])
if (k == 0):
X3_features = polynomial_kernel(X[:, 21:])
elif (k == 1):
X3_features = linear_kernel(X[:, 21:])
elif (k == 2):
X3_features = rbf_kernel(X[:, 21:])
elif (k == 3):
X3_features = laplacian_kernel(X[:, 21:])
elif (k == 4):
X3_features = similarity(X_new[:, 21:])
X_features = (X1_features + X2_features + X3_features)
e1 = []
for l in range(10):
train_indices, test_indices = splitdata(X=X, Y=Y, ratio=R, seed=1000 + l)
X_features1 = np.transpose(X_features)
X_features2 = X_features1[train_indices]
X_features3 = np.transpose(X_features2)
c = Lsvm_patatune(train_x=X_features3[train_indices], train_y=Y[train_indices])
print(c)
clf = SVC(C=c,kernel='precomputed')
clf.fit(X_features3[train_indices], Y[train_indices])
e1.append(clf.score(X_features3[test_indices], Y[test_indices]))
s = "combination of %d_%d_%d"%(i,j,k)
if np.mean(e1)>big:
big = np.mean(e1)
print(np.mean(e1))
print(s)
mm=s
err = big
std = np.std(e1)
testfile.write(s + ":%f \p %f" % (np.mean(e1), np.std(e1)) + '\n')
testfile.write("best peformance is" + mm + ":%f \p %f" % (err, std) + '\n')
testfile.close()
| apache-2.0 |
stevekuznetsov/ansible | lib/ansible/modules/cloud/ovirt/ovirt_clusters.py | 8 | 25741 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_clusters
short_description: Module to manage clusters in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage clusters in oVirt"
options:
name:
description:
- "Name of the the cluster to manage."
required: true
state:
description:
- "Should the cluster be present or absent"
choices: ['present', 'absent']
default: present
datacenter:
description:
- "Datacenter name where cluster reside."
description:
description:
- "Description of the cluster."
comment:
description:
- "Comment of the cluster."
network:
description:
- "Management network of cluster to access cluster hosts."
ballooning:
description:
- "If I(True) enable memory balloon optimization. Memory balloon is used to
re-distribute / reclaim the host memory based on VM needs
in a dynamic way."
virt:
description:
- "If I(True), hosts in this cluster will be used to run virtual machines."
gluster:
description:
- "If I(True), hosts in this cluster will be used as Gluster Storage
server nodes, and not for running virtual machines."
- "By default the cluster is created for virtual machine hosts."
threads_as_cores:
description:
- "If I(True) the exposed host threads would be treated as cores
which can be utilized by virtual machines."
ksm:
description:
- "I I(True) MoM enables to run Kernel Same-page Merging I(KSM) when
necessary and when it can yield a memory saving benefit that
outweighs its CPU cost."
ksm_numa:
description:
- "If I(True) enables KSM C(ksm) for best berformance inside NUMA nodes."
ha_reservation:
description:
- "If I(True) enable the oVirt to monitor cluster capacity for highly
available virtual machines."
trusted_service:
description:
- "If (True) enable integration with an OpenAttestation server."
vm_reason:
description:
- "If I(True) enable an optional reason field when a virtual machine
is shut down from the Manager, allowing the administrator to
provide an explanation for the maintenance."
host_reason:
description:
- "If I(True) enable an optional reason field when a host is placed
into maintenance mode from the Manager, allowing the administrator
to provide an explanation for the maintenance."
memory_policy:
description:
- "I(disabled) - Disables memory page sharing."
- "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host."
- "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host."
choices: ['disabled', 'server', 'desktop']
rng_sources:
description:
- "List that specify the random number generator devices that all hosts in the cluster will use."
- "Supported generators are: I(hwrng) and I(random)."
spice_proxy:
description:
- "The proxy by which the SPICE client will connect to virtual machines."
- "The address must be in the following format: I(protocol://[host]:[port])"
fence_enabled:
description:
- "If I(True) enables fencing on the cluster."
- "Fencing is enabled by default."
fence_skip_if_sd_active:
description:
- "If I(True) any hosts in the cluster that are Non Responsive
and still connected to storage will not be fenced."
fence_skip_if_connectivity_broken:
description:
- "If I(True) fencing will be temporarily disabled if the percentage
of hosts in the cluster that are experiencing connectivity issues
is greater than or equal to the defined threshold."
- "The threshold can be specified by C(fence_connectivity_threshold)."
fence_connectivity_threshold:
description:
- "The threshold used by C(fence_skip_if_connectivity_broken)."
resilience_policy:
description:
- "The resilience policy defines how the virtual machines are prioritized in the migration."
- "Following values are supported:"
- "C(do_not_migrate) - Prevents virtual machines from being migrated. "
- "C(migrate) - Migrates all virtual machines in order of their defined priority."
- "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts."
choices: ['do_not_migrate', 'migrate', 'migrate_highly_available']
migration_bandwidth:
description:
- "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host."
- "Following bandwith options are supported:"
- "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS."
- "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host."
- "C(custom) - Defined by user (in Mbps)."
choices: ['auto', 'hypervisor_default', 'custom']
migration_bandwidth_limit:
description:
- "Set the I(custom) migration bandwidth limit."
- "This parameter is used only when C(migration_bandwidth) is I(custom)."
migration_auto_converge:
description:
- "If I(True) auto-convergence is used during live migration of virtual machines."
- "Used only when C(migration_policy) is set to I(legacy)."
- "Following options are supported:"
- "C(true) - Override the global setting to I(true)."
- "C(false) - Override the global setting to I(false)."
- "C(inherit) - Use value which is set globally."
choices: ['true', 'false', 'inherit']
migration_compressed:
description:
- "If I(True) compression is used during live migration of the virtual machine."
- "Used only when C(migration_policy) is set to I(legacy)."
- "Following options are supported:"
- "C(true) - Override the global setting to I(true)."
- "C(false) - Override the global setting to I(false)."
- "C(inherit) - Use value which is set globally."
choices: ['true', 'false', 'inherit']
migration_policy:
description:
- "A migration policy defines the conditions for live migrating
virtual machines in the event of host failure."
- "Following policies are supported:"
- "C(legacy) - Legacy behavior of 3.6 version."
- "C(minimal_downtime) - Virtual machines should not experience any significant downtime."
- "C(suspend_workload) - Virtual machines may experience a more significant downtime."
choices: ['legacy', 'minimal_downtime', 'suspend_workload']
serial_policy:
description:
- "Specify a serial number policy for the virtual machines in the cluster."
- "Following options are supported:"
- "C(vm) - Sets the virtual machine's UUID as its serial number."
- "C(host) - Sets the host's UUID as the virtual machine's serial number."
- "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)."
serial_policy_value:
description:
- "Allows you to specify a custom serial number."
- "This parameter is used only when C(serial_policy) is I(custom)."
scheduling_policy:
description:
- "Name of the scheduling policy to be used for cluster."
cpu_arch:
description:
- "CPU architecture of cluster."
choices: ['x86_64', 'ppc64', 'undefined']
cpu_type:
description:
- "CPU codename. For example I(Intel SandyBridge Family)."
switch_type:
description:
- "Type of switch to be used by all networks in given cluster.
Either I(legacy) which is using linux brigde or I(ovs) using
Open vSwitch."
choices: ['legacy', 'ovs']
compatibility_version:
description:
- "The compatibility version of the cluster. All hosts in this
cluster must support at least this compatibility version."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create cluster
- ovirt_clusters:
datacenter: mydatacenter
name: mycluster
cpu_type: Intel SandyBridge Family
description: mycluster
compatibility_version: 4.0
# Create virt service cluster:
- ovirt_clusters:
datacenter: mydatacenter
name: mycluster
cpu_type: Intel Nehalem Family
description: mycluster
switch_type: legacy
compatibility_version: 4.0
ballooning: true
gluster: false
threads_as_cores: true
ha_reservation: true
trusted_service: false
host_reason: false
vm_reason: true
ksm_numa: true
memory_policy: server
rng_sources:
- hwrng
- random
# Remove cluster
- ovirt_clusters:
state: absent
name: mycluster
'''
RETURN = '''
id:
description: ID of the cluster which is managed
returned: On success if cluster is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
cluster:
description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster."
returned: On success if cluster is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class ClustersModule(BaseModule):
def __get_major(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.major
return int(full_version.split('.')[0])
def __get_minor(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.minor
return int(full_version.split('.')[1])
def param(self, name, default=None):
return self._module.params.get(name, default)
def _get_memory_policy(self):
memory_policy = self.param('memory_policy')
if memory_policy == 'desktop':
return 200
elif memory_policy == 'server':
return 150
elif memory_policy == 'disabled':
return 100
def _get_policy_id(self):
# These are hardcoded IDs, once there is API, please fix this.
# legacy - 00000000-0000-0000-0000-000000000000
# minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b
# suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c
migration_policy = self.param('migration_policy')
if migration_policy == 'legacy':
return '00000000-0000-0000-0000-000000000000'
elif migration_policy == 'minimal_downtime':
return '80554327-0569-496b-bdeb-fcbbf52b827b'
elif migration_policy == 'suspend_workload':
return '80554327-0569-496b-bdeb-fcbbf52b827c'
def _get_sched_policy(self):
sched_policy = None
if self.param('scheduling_policy'):
sched_policies_service = self._connection.system_service().scheduling_policies_service()
sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy'))
if not sched_policy:
raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy'))
return sched_policy
def build_entity(self):
sched_policy = self._get_sched_policy()
return otypes.Cluster(
name=self.param('name'),
comment=self.param('comment'),
description=self.param('description'),
ballooning_enabled=self.param('ballooning'),
gluster_service=self.param('gluster'),
virt_service=self.param('virt'),
threads_as_cores=self.param('threads_as_cores'),
ha_reservation=self.param('ha_reservation'),
trusted_service=self.param('trusted_service'),
optional_reason=self.param('vm_reason'),
maintenance_reason_required=self.param('host_reason'),
scheduling_policy=otypes.SchedulingPolicy(
id=sched_policy.id,
) if sched_policy else None,
serial_number=otypes.SerialNumber(
policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
value=self.param('serial_policy_value'),
) if (
self.param('serial_policy') is not None or
self.param('serial_policy_value') is not None
) else None,
migration=otypes.MigrationOptions(
auto_converge=otypes.InheritableBoolean(
self.param('migration_auto_converge'),
) if self.param('migration_auto_converge') else None,
bandwidth=otypes.MigrationBandwidth(
assignment_method=otypes.MigrationBandwidthAssignmentMethod(
self.param('migration_bandwidth'),
) if self.param('migration_bandwidth') else None,
custom_value=self.param('migration_bandwidth_limit'),
) if (
self.param('migration_bandwidth') or
self.param('migration_bandwidth_limit')
) else None,
compressed=otypes.InheritableBoolean(
self.param('migration_compressed'),
) if self.param('migration_compressed') else None,
policy=otypes.MigrationPolicy(
id=self._get_policy_id()
) if self.param('migration_policy') else None,
) if (
self.param('migration_bandwidth') is not None or
self.param('migration_bandwidth_limit') is not None or
self.param('migration_auto_converge') is not None or
self.param('migration_compressed') is not None or
self.param('migration_policy') is not None
) else None,
error_handling=otypes.ErrorHandling(
on_error=otypes.MigrateOnError(
self.param('resilience_policy')
),
) if self.param('resilience_policy') else None,
fencing_policy=otypes.FencingPolicy(
enabled=(
self.param('fence_enabled') or
self.param('fence_skip_if_connectivity_broken') or
self.param('fence_skip_if_sd_active')
),
skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken(
enabled=self.param('fence_skip_if_connectivity_broken'),
threshold=self.param('fence_connectivity_threshold'),
) if (
self.param('fence_skip_if_connectivity_broken') is not None or
self.param('fence_connectivity_threshold') is not None
) else None,
skip_if_sd_active=otypes.SkipIfSdActive(
enabled=self.param('fence_skip_if_sd_active'),
) if self.param('fence_skip_if_sd_active') else None,
) if (
self.param('fence_enabled') is not None or
self.param('fence_skip_if_sd_active') is not None or
self.param('fence_skip_if_connectivity_broken') is not None or
self.param('fence_connectivity_threshold') is not None
) else None,
display=otypes.Display(
proxy=self.param('spice_proxy'),
) if self.param('spice_proxy') else None,
required_rng_sources=[
otypes.RngSource(rng) for rng in self.param('rng_sources')
] if self.param('rng_sources') else None,
memory_policy=otypes.MemoryPolicy(
over_commit=otypes.MemoryOverCommit(
percent=self._get_memory_policy(),
),
) if self.param('memory_policy') else None,
ksm=otypes.Ksm(
enabled=self.param('ksm') or self.param('ksm_numa'),
merge_across_nodes=not self.param('ksm_numa'),
) if (
self.param('ksm_numa') is not None or
self.param('ksm') is not None
) else None,
data_center=otypes.DataCenter(
name=self.param('datacenter'),
) if self.param('datacenter') else None,
management_network=otypes.Network(
name=self.param('network'),
) if self.param('network') else None,
cpu=otypes.Cpu(
architecture=self.param('cpu_arch'),
type=self.param('cpu_type'),
) if (
self.param('cpu_arch') or self.param('cpu_type')
) else None,
version=otypes.Version(
major=self.__get_major(self.param('compatibility_version')),
minor=self.__get_minor(self.param('compatibility_version')),
) if self.param('compatibility_version') else None,
switch_type=otypes.SwitchType(
self.param('switch_type')
) if self.param('switch_type') else None,
)
def update_check(self, entity):
sched_policy = self._get_sched_policy()
migration_policy = getattr(entity.migration, 'policy', None)
return (
equal(self.param('comment'), entity.comment) and
equal(self.param('description'), entity.description) and
equal(self.param('switch_type'), str(entity.switch_type)) and
equal(self.param('cpu_arch'), str(entity.cpu.architecture)) and
equal(self.param('cpu_type'), entity.cpu.type) and
equal(self.param('ballooning'), entity.ballooning_enabled) and
equal(self.param('gluster'), entity.gluster_service) and
equal(self.param('virt'), entity.virt_service) and
equal(self.param('threads_as_cores'), entity.threads_as_cores) and
equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes and entity.ksm.enabled) and
equal(self.param('ksm'), entity.ksm.merge_across_nodes and entity.ksm.enabled) and
equal(self.param('ha_reservation'), entity.ha_reservation) and
equal(self.param('trusted_service'), entity.trusted_service) and
equal(self.param('host_reason'), entity.maintenance_reason_required) and
equal(self.param('vm_reason'), entity.optional_reason) and
equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and
equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and
equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and
equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and
equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and
equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and
equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and
equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and
equal(self.param('migration_compressed'), str(entity.migration.compressed)) and
equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
equal(self.param('scheduling_policy'), getattr(sched_policy, 'name', None)) and
equal(self._get_policy_id(), getattr(migration_policy, 'id', None)) and
equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and
equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and
equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and
equal(
self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None,
entity.migration.bandwidth.custom_value
) and
equal(
sorted(self.param('rng_sources')) if self.param('rng_sources') else None,
sorted([
str(source) for source in entity.required_rng_sources
])
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
ballooning=dict(default=None, type='bool', aliases=['balloon']),
gluster=dict(default=None, type='bool'),
virt=dict(default=None, type='bool'),
threads_as_cores=dict(default=None, type='bool'),
ksm_numa=dict(default=None, type='bool'),
ksm=dict(default=None, type='bool'),
ha_reservation=dict(default=None, type='bool'),
trusted_service=dict(default=None, type='bool'),
vm_reason=dict(default=None, type='bool'),
host_reason=dict(default=None, type='bool'),
memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop']),
rng_sources=dict(default=None, type='list'),
spice_proxy=dict(default=None),
fence_enabled=dict(default=None, type='bool'),
fence_skip_if_sd_active=dict(default=None, type='bool'),
fence_skip_if_connectivity_broken=dict(default=None, type='bool'),
fence_connectivity_threshold=dict(default=None, type='int'),
resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']),
migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']),
migration_bandwidth_limit=dict(default=None, type='int'),
migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']),
migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']),
migration_policy=dict(default=None, choices=['legacy', 'minimal_downtime', 'suspend_workload']),
serial_policy=dict(default=None, choices=['vm', 'host', 'custom']),
serial_policy_value=dict(default=None),
scheduling_policy=dict(default=None),
datacenter=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
network=dict(default=None),
cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']),
cpu_type=dict(default=None),
switch_type=dict(default=None, choices=['legacy', 'ovs']),
compatibility_version=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
clusters_module = ClustersModule(
connection=connection,
module=module,
service=clusters_service,
)
state = module.params['state']
if state == 'present':
ret = clusters_module.create()
elif state == 'absent':
ret = clusters_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
Rediker-Software/litle-sdk-for-python | litleSdkPythonTest/functional/TestUtf8.py | 1 | 2885 | # coding=utf-8
#Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
import re
import sys
import os
if not (("PYTHONIOENCODING" in os.environ)
and
re.search("^utf-?8$", os.environ["PYTHONIOENCODING"], re.I)):
sys.stderr.write(sys.argv[0] + ": Please set your PYTHONIOENCODING envariable to utf8\n")
sys.exit(1)
import unicodedata
if unicodedata.unidata_version < "6.0.0":
print("WARNING: Your old UCD is out of date, expected at least 6.0.0 but got",
unicodedata.unidata_version)
wide_enough = (sys.maxunicode >= 0x10FFFF)
if not wide_enough:
print("WARNING: Narrow build detected, your Python lacks full Unicode support!!")
class TestUtf8(unittest.TestCase):
def testJapaneseCharacters(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000001"
card.expDate = "1210"
card.type = "VI"
card.cardValidationNum = "123"
billingAddress = litleXmlFields.contact()
billingAddress.addressLine1 = u' チャプター'
billingAddress.city = "Tokyo"
authorization.card = card
authorization.billToAddress = billingAddress
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("000",response.response)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestUtf8)
return suite
if __name__ =='__main__':
unittest.main() | mit |
vshtanko/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
pyvec/cz.pycon.org-2016 | pyconcz_2016/proposals/pyconcz2016_config.py | 1 | 1247 | from datetime import datetime
from django.utils.timezone import get_current_timezone
from pyconcz_2016.proposals.models import Talk, Workshop, FinancialAid
tz = get_current_timezone()
class TalksConfig:
model = Talk
key = 'talks'
title = 'Talks'
cfp_title = 'Submit your talk'
template_about = 'proposals/talks_about.html'
date_start = datetime(year=2016, month=8, day=1, hour=12, minute=0, tzinfo=tz)
date_end = datetime(year=2016, month=9, day=15, hour=23, minute=59, second=59, tzinfo=tz)
class WorkshopsConfig:
model = Workshop
key = 'workshops'
title = 'Workshops'
cfp_title = 'Submit your workshop'
template_about = 'proposals/workshops_about.html'
date_start = datetime(year=2016, month=8, day=1, hour=12, minute=0, tzinfo=tz)
date_end = datetime(year=2016, month=10, day=5, hour=12, minute=0, tzinfo=tz)
class FinancialAidConfig:
model = FinancialAid
key = 'financial-aid'
title = 'Financial Aid'
cfp_title = 'Financial Aid Programme'
template_about = 'proposals/financial_aid_about.html'
date_start = datetime(year=2016, month=9, day=8, hour=12, minute=0, tzinfo=tz)
date_end = datetime(year=2016, month=10, day=8, hour=12, minute=0, tzinfo=tz)
| mit |
v1bri/gnuradio | gr-blocks/python/blocks/qa_min.py | 46 | 5641 | #!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import math
class test_min(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = (0, 0.2, -0.25, 0, 12, 0)
expected_result = (float(min(src_data)),)
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.min_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_002(self):
src_data=(-100,-99,-98,-97,-96,-1)
expected_result = (float(min(src_data)),)
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.min_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_003(self):
src_data0 = (0, 2, -3, 0, 12, 0)
src_data1 = (1, 1, 1, 1, 1, 1)
expected_result = [float(min(x,y)) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_f(src_data0)
src1 = blocks.vector_source_f(src_data1)
op = blocks.min_ff(1)
dst = blocks.vector_sink_f()
self.tb.connect(src0, (op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_004(self):
dim = 2
src_data0 = (0, 2, -3, 0, 12, 0)
src_data1 = (1, 1, 1, 1, 1, 1)
expected_data = []
tmp = [float(min(x,y)) for x,y in zip(src_data0, src_data1)]
for i in xrange(len(tmp)/dim):
expected_data.append(float(min(tmp[i*dim:(i+1)*dim])))
src0 = blocks.vector_source_f(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_float,dim)
src1 = blocks.vector_source_f(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_float,dim)
op = blocks.min_ff(dim)
dst = blocks.vector_sink_f()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s001(self):
src_data = (0, 2, -3, 0, 12, 0)
expected_result = (min(src_data),)
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short,len(src_data))
op = blocks.min_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s002(self):
src_data=(-100,-99,-98,-97,-96,-1)
expected_result = (min(src_data),)
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short, len(src_data))
op = blocks.min_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s003(self):
src_data0 = (0, 2, -3, 0, 12, 0)
src_data1 = (1, 1, 1, 1, 1, 1)
expected_result = [min(x,y) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_s(src_data0)
src1 = blocks.vector_source_s(src_data1)
op = blocks.min_ss(1)
dst = blocks.vector_sink_s()
self.tb.connect(src0, (op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s004(self):
dim = 2
src_data0 = (0, 2, -3, 0, 12, 0)
src_data1 = (1, 1, 1, 1, 1, 1)
expected_data = []
tmp = [min(x,y) for x,y in zip(src_data0, src_data1)]
for i in xrange(len(tmp)/dim):
expected_data.append(min(tmp[i*dim:(i+1)*dim]))
src0 = blocks.vector_source_s(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_short,dim)
src1 = blocks.vector_source_s(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_short,dim)
op = blocks.min_ss(dim)
dst = blocks.vector_sink_s()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_min, "test_min.xml")
| gpl-3.0 |
libvirt/autotest | cli/label.py | 5 | 7947 | #
# Copyright 2008 Google Inc. All Rights Reserved.
"""
The label module contains the objects and methods used to
manage labels in Autotest.
The valid actions are:
add: adds label(s), or hosts to an LABEL
remove: deletes label(s), or hosts from an LABEL
list: lists label(s)
The common options are:
--blist / -B: file containing a list of LABELs
See topic_common.py for a High Level Design and Algorithm.
"""
import os, sys
from autotest_lib.cli import topic_common, action_common
class label(topic_common.atest):
"""Label class
atest label [create|delete|list|add|remove] <options>"""
usage_action = '[create|delete|list|add|remove]'
topic = msg_topic = 'label'
msg_items = '<labels>'
def __init__(self):
"""Add to the parser the options common to all the
label actions"""
super(label, self).__init__()
self.parser.add_option('-B', '--blist',
help='File listing the labels',
type='string',
default=None,
metavar='LABEL_FLIST')
self.topic_parse_info = topic_common.item_parse_info(
attribute_name='labels',
filename_option='blist',
use_leftover=True)
def get_items(self):
return self.labels
class label_help(label):
"""Just here to get the atest logic working.
Usage is set by its parent"""
pass
class label_list(action_common.atest_list, label):
"""atest label list [--platform] [--all] [--atomicgroup]
[--valid-only] [--machine <machine>]
[--blist <file>] [<labels>]"""
def __init__(self):
super(label_list, self).__init__()
self.parser.add_option('-t', '--platform-only',
help='Display only platform labels',
action='store_true')
self.parser.add_option('-d', '--valid-only',
help='Display only valid labels',
action='store_true')
self.parser.add_option('-a', '--all',
help=('Display both normal & '
'platform labels'),
action='store_true')
self.parser.add_option('--atomicgroup',
help=('Display only atomic group labels '
'along with the atomic group name.'),
action='store_true')
self.parser.add_option('-m', '--machine',
help='List LABELs of MACHINE',
type='string',
metavar='MACHINE')
def parse(self):
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine')
(options, leftover) = super(label_list, self).parse([host_info])
exclusives = [options.all, options.platform_only, options.atomicgroup]
if exclusives.count(True) > 1:
self.invalid_syntax('Only specify one of --all,'
'--platform, --atomicgroup')
if len(self.hosts) > 1:
self.invalid_syntax(('Only one machine name allowed. '
'''Use '%s host list %s' '''
'instead.') %
(sys.argv[0], ','.join(self.hosts)))
self.all = options.all
self.atomicgroup = options.atomicgroup
self.platform_only = options.platform_only
self.valid_only = options.valid_only
return (options, leftover)
def execute(self):
filters = {}
check_results = {}
if self.hosts:
filters['host__hostname__in'] = self.hosts
check_results['host__hostname__in'] = None
if self.labels:
filters['name__in'] = self.labels
check_results['name__in'] = 'name'
return super(label_list, self).execute(op='get_labels',
filters=filters,
check_results=check_results)
def output(self, results):
if self.valid_only:
results = [label for label in results
if not label['invalid']]
if self.platform_only:
results = [label for label in results
if label['platform']]
keys = ['name', 'invalid']
elif self.atomicgroup:
results = [label for label in results
if label['atomic_group']]
keys = ['name', 'atomic_group.name', 'invalid']
elif not self.all:
results = [label for label in results
if not label['platform']]
keys = ['name', 'only_if_needed', 'invalid']
else:
keys = ['name', 'platform', 'only_if_needed', 'invalid']
super(label_list, self).output(results, keys)
class label_create(action_common.atest_create, label):
"""atest label create <labels>|--blist <file> --platform"""
def __init__(self):
super(label_create, self).__init__()
self.parser.add_option('-t', '--platform',
help='To create this label as a platform',
default=False,
action='store_true')
self.parser.add_option('-o', '--only_if_needed',
help='To mark the label as "only use if needed',
default=False,
action='store_true')
def parse(self):
(options, leftover) = super(label_create,
self).parse(req_items='labels')
self.data_item_key = 'name'
self.data['platform'] = options.platform
self.data['only_if_needed'] = options.only_if_needed
return (options, leftover)
class label_delete(action_common.atest_delete, label):
"""atest label delete <labels>|--blist <file>"""
pass
class label_add_or_remove(label):
def __init__(self):
super(label_add_or_remove, self).__init__()
lower_words = tuple(word.lower() for word in self.usage_words)
self.parser.add_option('-m', '--machine',
help=('%s MACHINE(s) %s the LABEL' %
self.usage_words),
type='string',
metavar='MACHINE')
self.parser.add_option('-M', '--mlist',
help='File containing machines to %s %s '
'the LABEL' % lower_words,
type='string',
metavar='MACHINE_FLIST')
def parse(self):
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine',
filename_option='mlist')
(options, leftover) = super(label_add_or_remove,
self).parse([host_info],
req_items='labels')
if not getattr(self, 'hosts', None):
self.invalid_syntax('%s %s requires at least one host' %
(self.msg_topic,
self.usage_action))
return (options, leftover)
class label_add(action_common.atest_add, label_add_or_remove):
"""atest label add <labels>|--blist <file>
--platform [--machine <machine>] [--mlist <file>]"""
pass
class label_remove(action_common.atest_remove, label_add_or_remove):
"""atest label remove <labels>|--blist <file>
[--machine <machine>] [--mlist <file>]"""
pass
| gpl-2.0 |
kvnn/btcimg | btcimg/apps/locker/views.py | 1 | 1632 | import requests
from decimal import Decimal
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.views import generic
from .models import Asset
from .forms import AssetForm
def _satoshi_to_bitcoin(num_satoshi):
return Decimal(0.00000001 * num_satoshi)
class AssetView(generic.View):
def _get_btc_received(self, asset):
'''Returns a Decimal value or None'''
url = 'https://blockchain.info/address/%s?format=json&limit=0' % asset.btc_address
req = requests.get(url)
if req.status_code == 200:
if 'total_received' in req.json():
return _satoshi_to_bitcoin(req.json()['total_received'])
return 0
def get(self, request, *args, **kwargs):
asset = get_object_or_404(Asset, slug=kwargs['slug'])
btc_received = self._get_btc_received(asset)
btc_left = asset.unlock_value - btc_received
data = {
'asset': asset,
'btc_left': btc_left,
'public_image': asset.get_public_image(btc_received),
'btc_received': btc_received,
}
return render(request, 'locker/detail.html', data)
class AssetCreate(generic.edit.CreateView):
model = Asset
form_class = AssetForm
def form_valid(self, form):
if self.request.user.is_authenticated():
form.fields['owner'] = self.request.user.id
return super(AssetCreate, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
class AssetList(generic.TemplateView):
template_name = 'locker/list.html'
| gpl-2.0 |
haggi/OpenMaya | src/mayaToCorona/mtco_devmodule/scripts/Corona/AETemplate/AECoronaFrontBackTemplate.py | 1 | 1956 | import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AECoronaFrontBackTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
log.debug("AECoronaLightTemplate")
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
pm.mel.AEswatchDisplay(nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
allAttributes = self.node.listAttr()
allowedAttributes = ["frontMaterial", "backMaterial", "opacity"]
for att in allAttributes:
att = att.split(".")[-1]
if not att in allowedAttributes:
self.suppress(att)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
self.beginLayout("Emission" ,collapse=0)
self.beginNoOptimize()
self.addControl("frontMaterial", label="Front Material")
self.addControl("backMaterial", label="Back Material")
self.addSeparator()
self.addControl("iesProfile", label="IES Profile")
self.addControl("emissionSharpnessFake", label="Sharp Patterns")
#self.addControl("emissionDisableSampling", label="Disable Sampling")
#self.addControl("emissionSharpnessFakePoint", label="Sharpness Fake Point")
self.endNoOptimize()
self.endLayout()
#self.beginLayout("Hardware Texturing" ,collapse=0)
#pm.mel.eval('AEhardwareTextureTemplate "%s"' % self.nodeName + r'("diffuse emissionColor ")')
#self.endLayout()
| mit |
TeamPurple/Cyber | Yowsup/connectionmanager.py | 2 | 49068 | '''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from Yowsup.ConnectionIO.protocoltreenode import ProtocolTreeNode
from Yowsup.ConnectionIO.ioexceptions import ConnectionClosedException
from Yowsup.ConnectionIO.connectionengine import ConnectionEngine
from Yowsup.Common.utilities import Utilities
from Yowsup.Common.debugger import Debugger
import threading, select, time
from Yowsup.Common.watime import WATime
from .Auth.auth import YowsupAuth
from Yowsup.Common.constants import Constants
from Yowsup.Interfaces.Lib.LibInterface import LibMethodInterface, LibSignalInterface
import tempfile
from random import randrange
import socket
import hashlib
import base64
import sys
import traceback
class YowsupConnectionManager:
def __init__(self):
Debugger.attach(self)
self.currKeyId = 1
self.iqId = 0
self.verbose = True
self.state = 0
self.lock = threading.Lock()
self.autoPong = True
self.domain = "s.whatsapp.net"
#self.methodInterface = MethodInterface(authenticatedSocketConnection.getId())
#self.signalInterface = SignalInterface(authenticatedSocketConnection.getId())
self.readerThread = None
self.methodInterface = LibMethodInterface()
self.signalInterface = LibSignalInterface()
self.readerThread = ReaderThread()
self.readerThread.setSignalInterface(self.signalInterface)
self.bindMethods()
def setInterfaces(self, signalInterface, methodInterface):
self.methodInterface = methodInterface
self.signalInterface = signalInterface
self.readerThread.setSignalInterface(self.signalInterface)
self.bindMethods()
def getSignalsInterface(self):
return self.signalInterface
def getMethodsInterface(self):
return self.methodInterface
def setAutoPong(self, autoPong):
self.autoPong = self.readerThread.autoPong = autoPong
def startReader(self):
if self.readerThread.isAlive():
self._d("Reader already started")
return 0
self._d("starting reader")
try:
self.readerThread.start()
self._d("started")
except RuntimeError:
self._d("Reader already started before")
self.readerThread.sendDisconnected()
return 0
return 1
def block(self):
self.readerThread.join()
def bindMethods(self):
self.methodInterface.registerCallback("getVersion", lambda: Constants.v)
self.methodInterface.registerCallback("message_send",self.sendText)
self.methodInterface.registerCallback("message_imageSend",self.sendImage)
self.methodInterface.registerCallback("message_audioSend",self.sendAudio)
self.methodInterface.registerCallback("message_videoSend",self.sendVideo)
self.methodInterface.registerCallback("message_locationSend",self.sendLocation)
self.methodInterface.registerCallback("message_vcardSend",self.sendVCard)
self.methodInterface.registerCallback("message_broadcast",self.sendBroadcast)
self.methodInterface.registerCallback("message_ack",self.sendMessageReceipt)
self.methodInterface.registerCallback("notification_ack", self.sendNotificationReceipt)
self.methodInterface.registerCallback("clientconfig_send",self.sendClientConfig)
self.methodInterface.registerCallback("delivered_ack",self.sendDeliveredReceiptAck)
self.methodInterface.registerCallback("visible_ack",self.sendVisibleReceiptAck)
self.methodInterface.registerCallback("ping",self.sendPing)
self.methodInterface.registerCallback("pong",self.sendPong)
self.methodInterface.registerCallback("typing_send",self.sendTyping)
self.methodInterface.registerCallback("typing_paused",self.sendPaused)
self.methodInterface.registerCallback("subject_ack",self.sendSubjectReceived)
self.methodInterface.registerCallback("group_getGroups", self.sendGetGroups)
self.methodInterface.registerCallback("group_getInfo",self.sendGetGroupInfo)
self.methodInterface.registerCallback("group_create",self.sendCreateGroupChat)
self.methodInterface.registerCallback("group_addParticipants",self.sendAddParticipants)
self.methodInterface.registerCallback("group_removeParticipants",self.sendRemoveParticipants)
self.methodInterface.registerCallback("group_end",self.sendEndGroupChat)
self.methodInterface.registerCallback("group_setSubject",self.sendSetGroupSubject)
self.methodInterface.registerCallback("group_setPicture", self.sendSetPicture)
self.methodInterface.registerCallback("group_getPicture", self.sendGetPicture)
self.methodInterface.registerCallback("group_getParticipants",self.sendGetParticipants)
self.methodInterface.registerCallback("picture_get",self.sendGetPicture)
self.methodInterface.registerCallback("picture_getIds",self.sendGetPictureIds)
self.methodInterface.registerCallback("contact_getProfilePicture", self.sendGetPicture)
self.methodInterface.registerCallback("status_update",self.sendChangeStatus)
self.methodInterface.registerCallback("presence_request",self.getLastOnline)
#self.methodInterface.registerCallback("presence_unsubscribe",self.sendUnsubscribe)#@@TODO implement method
self.methodInterface.registerCallback("presence_subscribe",self.sendSubscribe)
self.methodInterface.registerCallback("presence_sendAvailableForChat",self.sendAvailableForChat)
self.methodInterface.registerCallback("presence_sendAvailable",self.sendAvailable)
self.methodInterface.registerCallback("presence_sendUnavailable",self.sendUnavailable)
self.methodInterface.registerCallback("profile_setPicture", self.sendSetProfilePicture)
self.methodInterface.registerCallback("profile_getPicture", self.sendGetProfilePicture)
self.methodInterface.registerCallback("profile_setStatus", self.sendChangeStatus)
self.methodInterface.registerCallback("disconnect", self.disconnect)
self.methodInterface.registerCallback("ready", self.startReader)
self.methodInterface.registerCallback("auth_login", self.auth )
#self.methodInterface.registerCallback("auth_login", self.auth)
self.methodInterface.registerCallback("media_requestUpload", self.sendRequestUpload)
def disconnect(self, reason=""):
self._d("Disconnect sequence initiated")
self._d("Sending term signal to reader thread")
if self.readerThread.isAlive():
self.readerThread.terminate()
self._d("Shutting down socket")
self.socket.close()
self._d("Waiting for readerThread to die")
self.readerThread.join()
self._d("Disconnected!")
self._d(reason)
self.state = 0
self.readerThread.sendDisconnected(reason)
def getConnection(self):
return self.socket
def triggerEvent(self, eventName, stanza):
if eventName in self.events and self.events[eventName] is not None:
self.events[eventName](stanza)
def bindEvent(self, eventName, callback):
if eventName in self.events:
self.events[eventName] = callback
##########################################################
def _writeNode(self, node):
if self.state == 2:
try:
self.out.write(node)
return True
except ConnectionClosedException:
self._d("CONNECTION DOWN")
#self.disconnect("closed")
if self.readerThread.isAlive():
self.readerThread.terminate()
self.readerThread.join()
self.readerThread.sendDisconnected("closed")
return False
def onDisconnected(self):
self._d("Setting state to 0")
self.state = 0
def auth(self, username, password):
self._d(">>>>>>>> AUTH CALLED")
username = str(username)
#password = str(password)
#traceback.print_stack()
self.lock.acquire()
if self.state == 0 :
if self.readerThread.isAlive():
raise Exception("TWO READER THREADS ON BOARD!!")
self.readerThread = ReaderThread()
self.readerThread.autoPong = self.autoPong
self.readerThread.setSignalInterface(self.signalInterface)
yAuth = YowsupAuth(ConnectionEngine())
try:
self.state = 1
tokenData = Utilities.readToken()
resource = tokenData["r"] if tokenData else Constants.tokenData["r"]
connection = yAuth.authenticate(username, password, Constants.domain, resource)
except socket.gaierror:
self._d("DNS ERROR")
self.readerThread.sendDisconnected("dns")
#self.signalInterface.send("disconnected", ("dns",))
self.lock.release()
self.state = 0
return 0
except socket.error:
self._d("Socket error, connection timed out")
self.readerThread.sendDisconnected("closed")
#self.signalInterface.send("disconnected", ("closed",))
self.lock.release()
self.state = 0
return 0
except ConnectionClosedException:
self._d("Conn closed Exception")
self.readerThread.sendDisconnected("closed")
#self.signalInterface.send("disconnected", ("closed",))
self.lock.release()
self.state = 0
return 0
if not connection:
self.state = 0
self.signalInterface.send("auth_fail", (username, "invalid"))
self.lock.release()
return 0
self.state = 2
self.socket = connection
self.jid = self.socket.jid
#@@TODO REPLACE PROPERLY
self.out = self.socket.writer
self.readerThread.setSocket(self.socket)
self.readerThread.disconnectedCallback = self.onDisconnected
self.readerThread.onPing = self.sendPong
self.readerThread.ping = self.sendPing
self.signalInterface.send("auth_success", (username,))
self.lock.release()
def sendTyping(self,jid):
self._d("SEND TYPING TO JID")
composing = ProtocolTreeNode("composing",{"xmlns":"http://jabber.org/protocol/chatstates"})
message = ProtocolTreeNode("message",{"to":jid,"type":"chat"},[composing]);
self._writeNode(message);
def sendPaused(self,jid):
self._d("SEND PAUSED TO JID")
composing = ProtocolTreeNode("paused",{"xmlns":"http://jabber.org/protocol/chatstates"})
message = ProtocolTreeNode("message",{"to":jid,"type":"chat"},[composing]);
self._writeNode(message);
def getSubjectMessage(self,to,msg_id,child):
messageNode = ProtocolTreeNode("message",{"to":to,"type":"subject","id":msg_id},[child]);
return messageNode
def sendSubjectReceived(self,to,msg_id):
self._d("Sending subject recv receipt")
receivedNode = ProtocolTreeNode("received",{"xmlns": "urn:xmpp:receipts"});
messageNode = self.getSubjectMessage(to,msg_id,receivedNode);
self._writeNode(messageNode);
def sendMessageReceipt(self, jid, msgId):
self.sendReceipt(jid, "chat", msgId)
def sendNotificationReceipt(self, jid, notificationId):
self.sendReceipt(jid, "notification", notificationId)
def sendReceipt(self,jid,mtype,mid):
self._d("sending message received to "+jid+" - type:"+mtype+" - id:"+mid)
receivedNode = ProtocolTreeNode("received",{"xmlns": "urn:xmpp:receipts"})
messageNode = ProtocolTreeNode("message",{"to":jid,"type":mtype,"id":mid},[receivedNode]);
self._writeNode(messageNode);
def sendDeliveredReceiptAck(self,to,msg_id):
self._writeNode(self.getReceiptAck(to,msg_id,"delivered"));
def sendVisibleReceiptAck(self,to,msg_id):
self._writeNode(self.getReceiptAck(to,msg_id,"visible"));
def getReceiptAck(self,to,msg_id,receiptType):
ackNode = ProtocolTreeNode("ack",{"xmlns":"urn:xmpp:receipts","type":receiptType})
messageNode = ProtocolTreeNode("message",{"to":to,"type":"chat","id":msg_id},[ackNode]);
return messageNode;
def makeId(self,prefix):
self.iqId += 1
idx = ""
if self.verbose:
idx += prefix + str(self.iqId);
else:
idx = "%x" % self.iqId
return idx
def sendPing(self):
idx = self.makeId("ping_")
self.readerThread.requests[idx] = self.readerThread.parsePingResponse;
pingNode = ProtocolTreeNode("ping",{"xmlns":"w:p"});
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get","to":self.domain},[pingNode]);
self._writeNode(iqNode);
return idx
def sendPong(self,idx):
iqNode = ProtocolTreeNode("iq",{"type":"result","to":self.domain,"id":idx})
self._writeNode(iqNode);
def getLastOnline(self,jid):
print 'In getLastOnline'
if len(jid.split('-')) == 2 or jid == "Server@s.whatsapp.net": #SUPER CANCEL SUBSCRIBE TO GROUP AND SERVER
return
self.sendSubscribe(jid);
self._d("presence request Initiated for %s"%(jid))
idx = self.makeId("last_")
self.readerThread.requests[idx] = self.readerThread.parseLastOnline
query = ProtocolTreeNode("query",{"xmlns":"jabber:iq:last"})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get","to":jid},[query])
self._writeNode(iqNode)
def sendIq(self):
node = ProtocolTreeNode("iq",{"to":"g.us","type":"get","id":str(int(time.time()))+"-0"},None,'expired');
self._writeNode(node);
node = ProtocolTreeNode("iq",{"to":"s.whatsapp.net","type":"set","id":str(int(time.time()))+"-1"},None,'expired');
self._writeNode(node);
def sendAvailableForChat(self, pushname):
presenceNode = ProtocolTreeNode("presence",{"name":pushname})
self._writeNode(presenceNode);
def sendAvailable(self):
presenceNode = ProtocolTreeNode("presence",{"type":"available"})
self._writeNode(presenceNode);
def sendUnavailable(self):
presenceNode = ProtocolTreeNode("presence",{"type":"unavailable"})
self._writeNode(presenceNode);
def sendSubscribe(self,to):
presenceNode = ProtocolTreeNode("presence",{"type":"subscribe","to":to});
self._writeNode(presenceNode);
def mediaNode(fn):
def wrapped(self, *args):
mediaType = fn(self, *args)
url = args[1]
name = args[2]
size = args[3]
mmNode = ProtocolTreeNode("media", {"xmlns":"urn:xmpp:whatsapp:mms","type":mediaType,"file":name,"size":size,"url":url},None, args[4:][0] if args[4:] else None);
return mmNode
return wrapped
def sendMessage(fn):
def wrapped(self, *args):
node = fn(self, *args)
jid = "broadcast" if type(args[0]) == list else args[0]
messageNode = self.getMessageNode(jid, node)
self._writeNode(messageNode);
return messageNode.getAttributeValue("id")
return wrapped
def sendChangeStatus(self,status):
self._d("updating status to: %s"%(status))
bodyNode = ProtocolTreeNode("body",None,None,status);
messageNode = self.getMessageNode("s.us",bodyNode)
self._writeNode(messageNode);
return messageNode.getAttributeValue("id")
@sendMessage
def sendText(self,jid, content):
return ProtocolTreeNode("body",None,None,content);
@sendMessage
@mediaNode
def sendImage(self, jid, url, name, size, preview):
return "image"
@sendMessage
@mediaNode
def sendVideo(self, jid, url, name, size, preview):
return "video"
@sendMessage
@mediaNode
def sendAudio(self, jid, url, name, size):
return "audio"
@sendMessage
def sendLocation(self, jid, latitude, longitude, preview):
self._d("sending location (" + latitude + ":" + longitude + ")")
return ProtocolTreeNode("media", {"xmlns":"urn:xmpp:whatsapp:mms","type":"location","latitude":latitude,"longitude":longitude},None,preview)
@sendMessage
def sendVCard(self, jid, data, name):
cardNode = ProtocolTreeNode("vcard",{"name":name},None,data);
return ProtocolTreeNode("media", {"xmlns":"urn:xmpp:whatsapp:mms","type":"vcard"},[cardNode])
@sendMessage
def sendBroadcast(self, jids, content):
broadcastNode = ProtocolTreeNode("broadcast", None, [ProtocolTreeNode("to", {"jid": jid}) for jid in jids])
messageNode = ProtocolTreeNode("body",None,None,content);
return [broadcastNode, messageNode]
def sendClientConfig(self,sound,pushID,preview,platform):
idx = self.makeId("config_");
configNode = ProtocolTreeNode("config",{"xmlns":"urn:xmpp:whatsapp:push","sound":sound,"id":pushID,"preview":"1" if preview else "0","platform":platform})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":self.domain},[configNode]);
self._writeNode(iqNode);
# gtype should be either "participating" or "owning"
def sendGetGroups(self,gtype):
self._d("getting groups %s"%(gtype))
idx = self.makeId("get_groups_")
self.readerThread.requests[idx] = self.readerThread.parseGroups;
queryNode = ProtocolTreeNode("list",{"xmlns":"w:g","type":gtype})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get","to":"g.us"},[queryNode])
self._writeNode(iqNode)
def sendGetGroupInfo(self,jid):
self._d("getting group info for %s"%(jid))
idx = self.makeId("get_g_info_")
self.readerThread.requests[idx] = self.readerThread.parseGroupInfo;
queryNode = ProtocolTreeNode("query",{"xmlns":"w:g"})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get","to":jid},[queryNode])
self._writeNode(iqNode)
def sendCreateGroupChat(self,subject):
self._d("creating group: %s"%(subject))
idx = self.makeId("create_group_")
self.readerThread.requests[idx] = self.readerThread.parseGroupCreated;
queryNode = ProtocolTreeNode("group",{"xmlns":"w:g","action":"create","subject":subject})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":"g.us"},[queryNode])
self._writeNode(iqNode)
def sendAddParticipants(self, gjid, participants):
self._d("opening group: %s"%(gjid))
self._d("adding participants: %s"%(participants))
idx = self.makeId("add_group_participants_")
self.readerThread.requests[idx] = self.readerThread.parseAddedParticipants;
innerNodeChildren = []
for part in participants:
innerNodeChildren.append( ProtocolTreeNode("participant",{"jid":part}) )
queryNode = ProtocolTreeNode("add",{"xmlns":"w:g"},innerNodeChildren)
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":gjid},[queryNode])
self._writeNode(iqNode)
def sendRemoveParticipants(self,gjid, participants):
self._d("opening group: %s"%(gjid))
self._d("removing participants: %s"%(participants))
idx = self.makeId("remove_group_participants_")
self.readerThread.requests[idx] = self.readerThread.parseRemovedParticipants;
innerNodeChildren = []
for part in participants:
innerNodeChildren.append( ProtocolTreeNode("participant",{"jid":part}) )
queryNode = ProtocolTreeNode("remove",{"xmlns":"w:g"},innerNodeChildren)
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":gjid},[queryNode])
self._writeNode(iqNode)
def sendEndGroupChat(self,gjid):
self._d("removing group: %s"%(gjid))
idx = self.makeId("leave_group_")
self.readerThread.requests[idx] = self.readerThread.parseGroupEnded;
innerNodeChildren = []
innerNodeChildren.append( ProtocolTreeNode("group",{"id":gjid}) )
queryNode = ProtocolTreeNode("leave",{"xmlns":"w:g"},innerNodeChildren)
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":"g.us"},[queryNode])
self._writeNode(iqNode)
def sendSetGroupSubject(self,gjid,subject):
#subject = subject.encode('utf-8')
#self._d("setting group subject of " + gjid + " to " + subject)
idx = self.makeId("set_group_subject_")
self.readerThread.requests[idx] = self.readerThread.parseGroupSubject
queryNode = ProtocolTreeNode("subject",{"xmlns":"w:g","value":subject})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"set","to":gjid},[queryNode]);
self._writeNode(iqNode)
def sendGetParticipants(self,jid):
idx = self.makeId("get_participants_")
self.readerThread.requests[idx] = self.readerThread.parseParticipants
listNode = ProtocolTreeNode("list",{"xmlns":"w:g"})
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get","to":jid},[listNode]);
self._writeNode(iqNode)
def sendGetPicture(self,jid):
self._d("GETTING PICTURE FROM " + jid)
idx = self.makeId("get_picture_")
#@@TODO, ?!
self.readerThread.requests[idx] = self.readerThread.parseGetPicture
listNode = ProtocolTreeNode("picture",{"xmlns":"w:profile:picture","type":"image"})
iqNode = ProtocolTreeNode("iq",{"id":idx,"to":jid,"type":"get"},[listNode]);
self._writeNode(iqNode)
def sendGetPictureIds(self,jids):
idx = self.makeId("get_picture_ids_")
self.readerThread.requests[idx] = self.readerThread.parseGetPictureIds
innerNodeChildren = []
for jid in jids:
innerNodeChildren.append( ProtocolTreeNode("user",{"jid": jid}) )
queryNode = ProtocolTreeNode("list",{"xmlns":"w:profile:picture"},innerNodeChildren)
iqNode = ProtocolTreeNode("iq",{"id":idx,"type":"get"},[queryNode])
self._writeNode(iqNode)
def sendGetProfilePicture(self):
return self.sendGetPicture(self.jid)
def sendSetProfilePicture(self, filepath):
return self.sendSetPicture(self.jid, filepath)
def sendSetPicture(self, jid, imagePath):
f = open(imagePath, 'rb')
imageData = f.read()
imageData = bytearray(imageData)
f.close()
idx = self.makeId("set_picture_")
self.readerThread.requests[idx] = self.readerThread.parseSetPicture
listNode = ProtocolTreeNode("picture",{"xmlns":"w:profile:picture","type":"image"}, None, imageData)
iqNode = ProtocolTreeNode("iq",{"id":idx,"to":jid,"type":"set"},[listNode])
self._writeNode(iqNode)
def sendRequestUpload(self, b64Hash, t, size, b64OrigHash = None):
idx = self.makeId("upload_")
self.readerThread.requests[idx] = lambda iqresnode: self.readerThread.parseRequestUpload(iqresnode, b64Hash)
if type(size) is not str:
size = str(size)
attribs = {"xmlns":"w:m","hash":b64Hash, "type":t, "size":size}
if b64OrigHash:
attribs["orighash"] = b64OrigHash
mediaNode = ProtocolTreeNode("media", attribs)
iqNode = ProtocolTreeNode("iq",{"id":idx,"to":"s.whatsapp.net","type":"set"},[mediaNode])
self._writeNode(iqNode)
def getMessageNode(self, jid, child):
requestNode = None;
serverNode = ProtocolTreeNode("server",None);
xNode = ProtocolTreeNode("x",{"xmlns":"jabber:x:event"},[serverNode]);
childCount = (0 if requestNode is None else 1) +2;
messageChildren = []#[None]*childCount;
if requestNode is not None:
messageChildren.append(requestNode);
#System.currentTimeMillis() / 1000L + "-"+1
messageChildren.append(xNode)
if type(child) == list:
messageChildren.extend(child)
else:
messageChildren.append(child)
msgId = str(int(time.time()))+"-"+ str(self.currKeyId)
messageNode = ProtocolTreeNode("message",{"to":jid,"type":"chat","id":msgId},messageChildren)
self.currKeyId += 1
return messageNode;
class ReaderThread(threading.Thread):
def __init__(self):
Debugger.attach(self);
self.signalInterface = None
#self.socket = connection
self.terminateRequested = False
self.disconnectedSent = False
self.timeout = 180
self.selectTimeout = 3
self.requests = {};
self.lock = threading.Lock()
self.disconnectedCallback = None
self.autoPong = True
self.onPing = self.ping = None
self.lastPongTime = int(time.time())
super(ReaderThread,self).__init__();
self.daemon = True
def setSocket(self, connection):
self.socket = connection
def setSignalInterface(self, signalInterface):
self.signalInterface = signalInterface
def terminate(self):
self._d("attempting to exit gracefully")
self.terminateRequested = True
def sendDisconnected(self, reason="noreason"):
self._d("Sending disconnected because of %s" % reason)
self.lock.acquire()
if not self.disconnectedSent:
self.disconnectedSent = True
if self.disconnectedCallback:
self.disconnectedCallback()
self.lock.release()
self.signalInterface.send("disconnected", (reason,))
def run(self):
self._d("Read thread startedX");
while True:
countdown = self.timeout - ((int(time.time()) - self.lastPongTime))
remainder = countdown % self.selectTimeout
countdown = countdown - remainder
if countdown <= 0:
self._d("No hope, dying!")
self.sendDisconnected("closed")
return
else:
if countdown % (self.selectTimeout*10) == 0 or countdown < 11:
self._d("Waiting, time to die: T-%i seconds" % countdown )
if countdown < 150 and self.ping and self.autoPong:
self.ping()
self.selectTimeout = 1 if countdown < 11 else 3
try:
ready = select.select([self.socket.reader.rawIn], [], [], self.selectTimeout)
except:
self._d("Error in ready")
raise
return
if self.terminateRequested:
return
if ready[0]:
try:
node = self.socket.reader.nextTree()
except ConnectionClosedException:
#print traceback.format_exc()
self._d("Socket closed, got 0 bytes!")
#self.signalInterface.send("disconnected", ("closed",))
self.sendDisconnected("closed")
return
self.lastPongTime = int(time.time());
if node is not None:
if ProtocolTreeNode.tagEquals(node,"iq"):
iqType = node.getAttributeValue("type")
idx = node.getAttributeValue("id")
if iqType is None:
raise Exception("iq doesn't have type")
if iqType == "result":
if idx in self.requests:
self.requests[idx](node)
del self.requests[idx]
elif idx.startswith(self.connection.user):
accountNode = node.getChild(0)
ProtocolTreeNode.require(accountNode,"account")
kind = accountNode.getAttributeValue("kind")
if kind == "paid":
self.connection.account_kind = 1
elif kind == "free":
self.connection.account_kind = 0
else:
self.connection.account_kind = -1
expiration = accountNode.getAttributeValue("expiration")
if expiration is None:
raise Exception("no expiration")
try:
self.connection.expire_date = long(expiration)
except ValueError:
raise IOError("invalid expire date %s"%(expiration))
self.eventHandler.onAccountChanged(self.connection.account_kind,self.connection.expire_date)
elif iqType == "error":
if idx in self.requests:
self.requests[idx](node)
del self.requests[idx]
elif iqType == "get":
childNode = node.getChild(0)
if ProtocolTreeNode.tagEquals(childNode,"ping"):
if self.autoPong:
self.onPing(idx)
self.signalInterface.send("ping", (idx,))
elif ProtocolTreeNode.tagEquals(childNode,"query") and node.getAttributeValue("from") is not None and "http://jabber.org/protocol/disco#info" == childNode.getAttributeValue("xmlns"):
pin = childNode.getAttributeValue("pin");
timeoutString = childNode.getAttributeValue("timeout");
try:
timeoutSeconds = int(timeoutString) if timeoutString is not None else None
except ValueError:
raise Exception("relay-iq exception parsing timeout %s "%(timeoutString))
if pin is not None:
self.eventHandler.onRelayRequest(pin,timeoutSeconds,idx)
elif iqType == "set":
childNode = node.getChild(0)
if ProtocolTreeNode.tagEquals(childNode,"query"):
xmlns = childNode.getAttributeValue("xmlns")
if xmlns == "jabber:iq:roster":
itemNodes = childNode.getAllChildren("item");
ask = ""
for itemNode in itemNodes:
jid = itemNode.getAttributeValue("jid")
subscription = itemNode.getAttributeValue("subscription")
ask = itemNode.getAttributeValue("ask")
else:
raise Exception("Unkown iq type %s"%(iqType))
elif ProtocolTreeNode.tagEquals(node,"presence"):
xmlns = node.getAttributeValue("xmlns")
jid = node.getAttributeValue("from")
if (xmlns is None or xmlns == "urn:xmpp") and jid is not None:
presenceType = node.getAttributeValue("type")
if presenceType == "unavailable":
self.signalInterface.send("presence_unavailable", (jid,))
elif presenceType is None or presenceType == "available":
self.signalInterface.send("presence_available", (jid,))
elif xmlns == "w" and jid is not None:
status = node.getAttributeValue("status")
if status == "dirty":
#categories = self.parseCategories(node); #@@TODO, send along with signal
self._d("WILL SEND DIRTY")
self.signalInterface.send("status_dirty")
self._d("SENT DIRTY")
elif ProtocolTreeNode.tagEquals(node,"message"):
self.parseMessage(node)
self._d("Reader thread terminating now!")
def parseOfflineMessageStamp(self,stamp):
watime = WATime();
parsed = watime.parseIso(stamp)
local = watime.utcToLocal(parsed)
stamp = watime.datetimeToTimestamp(local)
return stamp
def parsePingResponse(self, node):
idx = node.getAttributeValue("id")
self.lastPongTime = int(time.time())
def parseLastOnline(self,node):
jid = node.getAttributeValue("from");
firstChild = node.getChild(0);
if "error" in firstChild.toString():
return
ProtocolTreeNode.require(firstChild,"query");
seconds = firstChild.getAttributeValue("seconds");
status = None
status = firstChild.data #@@TODO discarded?
print 'seconds', seconds
try:
if seconds is not None and jid is not None:
self.signalInterface.send("presence_updated", (jid, int(seconds)))
except:
self._d("Ignored exception in handleLastOnline "+ sys.exc_info()[1])
def parseGroups(self,node):
children = node.getAllChildren("group");
for groupNode in children:
jid = groupNode.getAttributeValue("id") + "@g.us"
owner = groupNode.getAttributeValue("owner")
subject = groupNode.getAttributeValue("subject") if sys.version_info < (3, 0) else groupNode.getAttributeValue("subject").encode('latin-1').decode()
subjectT = groupNode.getAttributeValue("s_t")
subjectOwner = groupNode.getAttributeValue("s_o")
creation = groupNode.getAttributeValue("creation")
self.signalInterface.send("group_gotInfo",(jid, owner, subject, subjectOwner, int(subjectT),int(creation)))
def parseGroupInfo(self,node):
jid = node.getAttributeValue("from");
groupNode = node.getChild(0)
if "error code" in groupNode.toString():
self.signalInterface.send("group_infoError",(0,)) #@@TODO replace with real error code
else:
ProtocolTreeNode.require(groupNode,"group")
#gid = groupNode.getAttributeValue("id")
owner = groupNode.getAttributeValue("owner")
subject = groupNode.getAttributeValue("subject") if sys.version_info < (3, 0) else groupNode.getAttributeValue("subject").encode('latin-1').decode();
subjectT = groupNode.getAttributeValue("s_t")
subjectOwner = groupNode.getAttributeValue("s_o")
creation = groupNode.getAttributeValue("creation")
self.signalInterface.send("group_gotInfo",(jid, owner, subject, subjectOwner, int(subjectT),int(creation)))
def parseAddedParticipants(self, node):
jid = node.getAttributeValue("from");
jids = []
addNodes = node.getAllChildren("add")
for a in addNodes:
t = a.getAttributeValue("type")
if t == "success":
jids.append(a.getAttributeValue("participant"))
else:
self._d("Failed to add %s" % jids.append(a.getAttributeValue("participant")))
self.signalInterface.send("group_addParticipantsSuccess", (jid, jids))
def parseRemovedParticipants(self,node): #fromm, successVector=None,failTable=None
jid = node.getAttributeValue("from");
jids = []
addNodes = node.getAllChildren("remove")
for a in addNodes:
t = a.getAttributeValue("type")
if t == "success":
jids.append(a.getAttributeValue("participant"))
else:
self._d("Failed to add %s" % jids.append(a.getAttributeValue("participant")))
self._d("handleRemovedParticipants DONE!");
self.signalInterface.send("group_removeParticipantsSuccess", (jid, jids))
def parseGroupCreated(self,node):
jid = node.getAttributeValue("from");
groupNode = node.getChild(0)
if ProtocolTreeNode.tagEquals(groupNode,"error"):
errorCode = groupNode.getAttributeValue("code")
self.signalInterface.send("group_createFail", (errorCode,))
return
ProtocolTreeNode.require(groupNode,"group")
group_id = groupNode.getAttributeValue("id")
self.signalInterface.send("group_createSuccess", (group_id + "@g.us",))
def parseGroupEnded(self,node):
#jid = node.getAttributeValue("from");
leaveNode = node.getChild(0)
groupNode = leaveNode.getChild(0)
jid = groupNode.getAttributeValue("id")
self.signalInterface.send("group_endSuccess", (jid,))
def parseGroupSubject(self,node):
jid = node.getAttributeValue("from");
self.signalInterface.send("group_setSubjectSuccess", (jid,))
def parseParticipants(self,node):
jid = node.getAttributeValue("from");
children = node.getAllChildren("participant");
jids = []
for c in children:
jids.append(c.getAttributeValue("jid"))
self.signalInterface.send("group_gotParticipants", (jid, jids))
#@@TODO PICTURE STUFF
def createTmpFile(self, data, mode = "w"):
tmp = tempfile.mkstemp()[1]
tmpfile = open(tmp, mode)
tmpfile.write(data)
tmpfile.close()
return tmp
def parseGetPicture(self,node):
jid = node.getAttributeValue("from");
if "error code" in node.toString():
return;
pictureNode = node.getChild("picture")
if pictureNode is not None and pictureNode.data is not None:
tmp = self.createTmpFile(pictureNode.data if sys.version_info < (3, 0) else pictureNode.data.encode('latin-1'), "wb")
pictureId = int(pictureNode.getAttributeValue('id'))
try:
jid.index('-')
self.signalInterface.send("group_gotPicture", (jid, pictureId, tmp))
except ValueError:
self.signalInterface.send("contact_gotProfilePicture", (jid, pictureId, tmp))
def parseGetPictureIds(self,node):
jid = node.getAttributeValue("from");
groupNode = node.getChild("list")
#self._d(groupNode.toString())
children = groupNode.getAllChildren("user");
#pids = []
for c in children:
if c.getAttributeValue("id") is not None:
#pids.append({"jid":c.getAttributeValue("jid"),"id":c.getAttributeValue("id")})
self.signalInterface.send("contact_gotProfilePictureId", (c.getAttributeValue("jid"), c.getAttributeValue("id")))
#self.signalInterface.send("contact_gotProfilePictureIds", (pids,))
def parseSetPicture(self,node):
jid = node.getAttributeValue("from");
picNode = node.getChild("picture")
try:
jid.index('-')
if picNode is None:
self.signalInterface.send("group_setPictureError", (jid,0)) #@@TODO SEND correct error code
else:
pictureId = int(picNode.getAttributeValue("id"))
self.signalInterface.send("group_setPictureSuccess", (jid, pictureId))
except ValueError:
if picNode is None:
self.signalInterface.send("profile_setPictureError", (0,)) #@@TODO SEND correct error code
else:
pictureId = int(picNode.getAttributeValue("id"))
self.signalInterface.send("profile_setPictureSuccess", (pictureId,))
def parseRequestUpload(self, iqNode, _hash):
mediaNode = iqNode.getChild("media")
if mediaNode:
url = mediaNode.getAttributeValue("url")
resumeFrom = mediaNode.getAttributeValue("resume")
if not resumeFrom:
resumeFrom = 0
if url:
self.signalInterface.send("media_uploadRequestSuccess", (_hash, url, resumeFrom))
else:
self.signalInterface.send("media_uploadRequestFailed", (_hash,))
else:
duplicateNode = iqNode.getChild("duplicate")
if duplicateNode:
url = duplicateNode.getAttributeValue("url")
self.signalInterface.send("media_uploadRequestDuplicate", (_hash, url))
else:
self.signalInterface.send("media_uploadRequestFailed", (_hash,))
def parseMessage(self,messageNode):
bodyNode = messageNode.getChild("body");
# offlineNode = messageNode.getChild("offline")
newSubject = "" if bodyNode is None else bodyNode.data;
msgData = None
# timestamp =long(time.time()*1000) if not offlineNode else int(messageNode.getAttributeValue("t"))*1000;
timestamp =int(messageNode.getAttributeValue("t"))
isGroup = False
isBroadcast = False
if newSubject.find("New version of WhatsApp Messenger is now available")>-1:
self._d("Rejecting whatsapp server message")
return #REJECT THIS FUCKING MESSAGE!
fromAttribute = messageNode.getAttributeValue("from");
try:
fromAttribute.index('-')
isGroup = True
except:
pass
author = messageNode.getAttributeValue("author");
#@@TODO reactivate blocked contacts check from client
'''if fromAttribute is not None and fromAttribute in self.eventHandler.blockedContacts:
self._d("CONTACT BLOCKED!")
return
if author is not None and author in self.eventHandler.blockedContacts:
self._d("CONTACT BLOCKED!")
return
'''
pushName = None
notifNode = messageNode.getChild("notify")
if notifNode is not None:
pushName = notifNode.getAttributeValue("name");
#pushName = pushName.decode("utf8")
msgId = messageNode.getAttributeValue("id");
attribute_t = messageNode.getAttributeValue("t");
typeAttribute = messageNode.getAttributeValue("type");
if typeAttribute == "error":
errorCode = 0;
errorNodes = messageNode.getAllChildren("error");
for errorNode in errorNodes:
codeString = errorNode.getAttributeValue("code")
try:
errorCode = int(codeString);
except ValueError:
'''catch value error'''
self.signalInterface.send("message_error", (msgId, fromAttribute, errorCode))
elif typeAttribute == "notification":
receiptRequested = False;
pictureUpdated = None
pictureUpdated = messageNode.getChild("notification").getAttributeValue("type");
wr = None
wr = messageNode.getChild("request").getAttributeValue("xmlns");
if wr == "urn:xmpp:receipts":
receiptRequested = True
if pictureUpdated == "picture":
notifNode = messageNode.getChild("notification");
#bodyNode = messageNode.getChild("notification").getChild("set") or messageNode.getChild("notification").getChild("delete")
bodyNode = notifNode.getChild("set")
if bodyNode:
pictureId = int(bodyNode.getAttributeValue("id"))
if isGroup:
self.signalInterface.send("notification_groupPictureUpdated",(bodyNode.getAttributeValue("jid"), bodyNode.getAttributeValue("author"), timestamp, msgId, pictureId, receiptRequested))
else:
self.signalInterface.send("notification_contactProfilePictureUpdated",(bodyNode.getAttributeValue("jid"), timestamp, msgId, pictureId, receiptRequested))
else:
bodyNode = notifNode.getChild("delete")
if bodyNode:
if isGroup:
self.signalInterface.send("notification_groupPictureRemoved",(bodyNode.getAttributeValue("jid"), bodyNode.getAttributeValue("author"), timestamp, msgId, receiptRequested))
else:
self.signalInterface.send("notification_contactProfilePictureRemoved",(bodyNode.getAttributeValue("jid"), timestamp, msgId, receiptRequested))
#if isGroup:
#
# self.signalInterface.send("notification_groupPictureUpdated",(bodyNode.getAttributeValue("jid"), bodyNode.getAttributeValue("author"), timestamp, msgId, receiptRequested))
#else:
# self.signalInterface.send("notification_contactProfilePictureUpdated",(bodyNode.getAttributeValue("jid"), timestamp, msgId, receiptRequested))
else:
addSubject = None
removeSubject = None
author = None
bodyNode = messageNode.getChild("notification").getChild("add");
if bodyNode is not None:
addSubject = bodyNode.getAttributeValue("jid");
author = bodyNode.getAttributeValue("author") or addSubject
bodyNode = messageNode.getChild("notification").getChild("remove");
if bodyNode is not None:
removeSubject = bodyNode.getAttributeValue("jid");
author = bodyNode.getAttributeValue("author") or removeSubject
if addSubject is not None:
self.signalInterface.send("notification_groupParticipantAdded", (fromAttribute, addSubject, author, timestamp, msgId, receiptRequested))
if removeSubject is not None:
self.signalInterface.send("notification_groupParticipantRemoved", (fromAttribute, removeSubject, author, timestamp, msgId, receiptRequested))
elif typeAttribute == "subject":
receiptRequested = False;
requestNodes = messageNode.getAllChildren("request");
for requestNode in requestNodes:
if requestNode.getAttributeValue("xmlns") == "urn:xmpp:receipts":
receiptRequested = True;
bodyNode = messageNode.getChild("body");
newSubject = None if bodyNode is None else (bodyNode.data if sys.version_info < (3, 0) else bodyNode.data.encode('latin-1').decode());
if newSubject is not None:
self.signalInterface.send("group_subjectReceived",(msgId, fromAttribute, author, newSubject, int(attribute_t), receiptRequested))
elif typeAttribute == "chat":
wantsReceipt = False;
messageChildren = [] if messageNode.children is None else messageNode.children
for childNode in messageChildren:
if ProtocolTreeNode.tagEquals(childNode,"request"):
wantsReceipt = True;
if ProtocolTreeNode.tagEquals(childNode,"broadcast"):
isBroadcast = True
elif ProtocolTreeNode.tagEquals(childNode,"composing"):
self.signalInterface.send("contact_typing", (fromAttribute,))
elif ProtocolTreeNode.tagEquals(childNode,"paused"):
self.signalInterface.send("contact_paused",(fromAttribute,))
elif ProtocolTreeNode.tagEquals(childNode,"media") and msgId is not None:
self._d("MULTIMEDIA MESSAGE!");
mediaUrl = messageNode.getChild("media").getAttributeValue("url");
mediaType = messageNode.getChild("media").getAttributeValue("type")
mediaSize = messageNode.getChild("media").getAttributeValue("size")
encoding = messageNode.getChild("media").getAttributeValue("encoding")
mediaPreview = None
if mediaType == "image":
mediaPreview = messageNode.getChild("media").data
if encoding == "raw" and mediaPreview:
mediaPreview = base64.b64encode(mediaPreview) if sys.version_info < (3, 0) else base64.b64encode(mediaPreview.encode('latin-1')).decode()
if isGroup:
self.signalInterface.send("group_imageReceived", (msgId, fromAttribute, author, mediaPreview, mediaUrl, mediaSize, wantsReceipt))
else:
self.signalInterface.send("image_received", (msgId, fromAttribute, mediaPreview, mediaUrl, mediaSize, wantsReceipt, isBroadcast))
elif mediaType == "video":
mediaPreview = messageNode.getChild("media").data
if encoding == "raw" and mediaPreview:
mediaPreview = base64.b64encode(mediaPreview) if sys.version_info < (3, 0) else base64.b64encode(mediaPreview.encode('latin-1')).decode()
if isGroup:
self.signalInterface.send("group_videoReceived", (msgId, fromAttribute, author, mediaPreview, mediaUrl, mediaSize, wantsReceipt))
else:
self.signalInterface.send("video_received", (msgId, fromAttribute, mediaPreview, mediaUrl, mediaSize, wantsReceipt, isBroadcast))
elif mediaType == "audio":
mediaPreview = messageNode.getChild("media").data
if isGroup:
self.signalInterface.send("group_audioReceived", (msgId, fromAttribute, author, mediaUrl, mediaSize, wantsReceipt))
else:
self.signalInterface.send("audio_received", (msgId, fromAttribute, mediaUrl, mediaSize, wantsReceipt, isBroadcast))
elif mediaType == "location":
mlatitude = messageNode.getChild("media").getAttributeValue("latitude")
mlongitude = messageNode.getChild("media").getAttributeValue("longitude")
name = messageNode.getChild("media").getAttributeValue("name")
if name and not sys.version_info < (3, 0):
name = name.encode('latin-1').decode()
mediaPreview = messageNode.getChild("media").data
if encoding == "raw" and mediaPreview:
mediaPreview = base64.b64encode(mediaPreview) if sys.version_info < (3, 0) else base64.b64encode(mediaPreview.encode('latin-1')).decode()
if isGroup:
self.signalInterface.send("group_locationReceived", (msgId, fromAttribute, author, name or "", mediaPreview, mlatitude, mlongitude, wantsReceipt))
else:
self.signalInterface.send("location_received", (msgId, fromAttribute, name or "", mediaPreview, mlatitude, mlongitude, wantsReceipt, isBroadcast))
elif mediaType =="vcard":
#return
#mediaItem.preview = messageNode.getChild("media").data
vcardData = messageNode.getChild("media").getChild("vcard").toString()
vcardName = messageNode.getChild("media").getChild("vcard").getAttributeValue("name")
if vcardName and not sys.version_info < (3, 0):
vcardName = vcardName.encode('latin-1').decode()
if vcardData is not None:
n = vcardData.find(">") +1
vcardData = vcardData[n:]
vcardData = vcardData.replace("</vcard>","")
if isGroup:
self.signalInterface.send("group_vcardReceived", (msgId, fromAttribute, author, vcardName, vcardData, wantsReceipt))
else:
self.signalInterface.send("vcard_received", (msgId, fromAttribute, vcardName, vcardData, wantsReceipt, isBroadcast))
else:
self._d("Unknown media type")
return
elif ProtocolTreeNode.tagEquals(childNode,"body") and msgId is not None:
msgData = childNode.data;
#fmsg.setData({"status":0,"key":key.toString(),"content":msgdata,"type":WAXMPP.message_store.store.Message.TYPE_RECEIVED});
elif ProtocolTreeNode.tagEquals(childNode,"received") and fromAttribute is not None and msgId is not None:
if fromAttribute == "s.us":
self.signalInterface.send("profile_setStatusSuccess", ("s.us", msgId,))
return;
#@@TODO autosend ack from client
#print "NEW MESSAGE RECEIVED NOTIFICATION!!!"
#self.connection.sendDeliveredReceiptAck(fromAttribute,msg_id);
self.signalInterface.send("receipt_messageDelivered", (fromAttribute, msgId))
return
elif not (ProtocolTreeNode.tagEquals(childNode,"active")):
if ProtocolTreeNode.tagEquals(childNode,"request"):
wantsReceipt = True;
elif ProtocolTreeNode.tagEquals(childNode,"notify"):
notify_name = childNode.getAttributeValue("name");
elif ProtocolTreeNode.tagEquals(childNode,"delay"):
xmlns = childNode.getAttributeValue("xmlns");
if "urn:xmpp:delay" == xmlns:
stamp_str = childNode.getAttributeValue("stamp");
if stamp_str is not None:
stamp = stamp_str
timestamp = self.parseOfflineMessageStamp(stamp)*1000;
elif ProtocolTreeNode.tagEquals(childNode,"x"):
xmlns = childNode.getAttributeValue("xmlns");
if "jabber:x:event" == xmlns and msgId is not None:
if fromAttribute == "broadcast":
self.signalInterface.send("receipt_broadcastSent", (msgId,))
else:
self.signalInterface.send("receipt_messageSent", (fromAttribute, msgId))
elif "jabber:x:delay" == xmlns:
continue; #@@TODO FORCED CONTINUE, WHAT SHOULD I DO HERE? #wtf?
stamp_str = childNode.getAttributeValue("stamp");
if stamp_str is not None:
stamp = stamp_str
timestamp = stamp;
else:
if ProtocolTreeNode.tagEquals(childNode,"delay") or not ProtocolTreeNode.tagEquals(childNode,"received") or msgId is None:
continue;
receipt_type = childNode.getAttributeValue("type");
if receipt_type is None or receipt_type == "delivered":
self.signalInterface.send("receipt_messageDelivered", (fromAttribute, msgId))
elif receipt_type == "visible":
self.signalInterface.send("receipt_visible", (fromAttribute, msgId))
if msgData:
msgData = msgData if sys.version_info < (3, 0) else msgData.encode('latin-1').decode()
if isGroup:
self.signalInterface.send("group_messageReceived", (msgId, fromAttribute, author, msgData, timestamp, wantsReceipt, pushName))
else:
self.signalInterface.send("message_received", (msgId, fromAttribute, msgData, timestamp, wantsReceipt, pushName, isBroadcast))
##@@TODO FROM CLIENT
'''if conversation.type == "group":
if conversation.subject is None:
signal = False
self._d("GETTING GROUP INFO")
self.connection.sendGetGroupInfo(fromAttribute)
'''
#if not len(conversation.getContacts()):
# self._d("GETTING GROUP CONTACTS")
# self.connection.sendGetParticipants(fromAttribute)
'''@@TODO FROM CLIENT
if ret is None:
conversation.incrementNew();
WAXMPP.message_store.pushMessage(fromAttribute,fmsg)
fmsg.key = key
else:
fmsg.key = eval(ret.key)
duplicate = True;
'''
| gpl-3.0 |
Stratoscale/rackattack-virtual | systemtests/uservirtualrackattack.py | 2 | 1554 | import subprocess
import os
import shutil
import time
from rackattack import clientfactory
from tests import testlib
import rackattack
class UserVirtualRackAttack:
MAXIMUM_VMS = 4
def __init__(self):
assert '/usr' not in rackattack.__file__
self._requestPort = 3443
self._subscribePort = 3444
imageDir = os.path.join(os.getcwd(), "images.fortests")
shutil.rmtree(imageDir, ignore_errors=True)
self._popen = subprocess.Popen(
["sudo", "PYTHONPATH=.", "UPSETO_JOIN_PYTHON_NAMESPACES=Yes",
"python", "rackattack/virtual/main.py",
"--requestPort=%d" % self._requestPort,
"--subscribePort=%d" % self._subscribePort,
"--diskImagesDirectory=" + imageDir,
"--serialLogsDirectory=" + imageDir,
"--maximumVMs=%d" % self.MAXIMUM_VMS],
close_fds=True, stderr=subprocess.STDOUT)
testlib.waitForTCPServer(('localhost', self._requestPort))
time.sleep(0.5) # dnsmasq needs to be able to receive a SIGHUP
def done(self):
if self._popen.poll() is not None:
raise Exception("Virtual RackAttack server terminated before it's time")
subprocess.check_call(["sudo", "kill", str(self._popen.pid)], close_fds=True)
self._popen.wait()
def createClient(self):
os.environ['RACKATTACK_PROVIDER'] = 'tcp://localhost:%d@tcp://localhost:%d' % (
self._requestPort, self._subscribePort)
return clientfactory.factory()
| apache-2.0 |
Alecardv/College-projects | MiniPascal/mpastype.py | 1 | 1329 | # -*- coding: utf-8 -*-
# @Author: J. Alejandro Cardona Valdes
"""
Sistema de Tipos de MPascal
===========================
Este archivo define las clases de representacion de tipos. Esta es una
clase general usada para representar todos los tipos. Cada tipo es entonces
una instancia singleton de la clase tipo.
class MpasType(object):
pass
int_type = MpasType("int",...)
float_type = MpasType("float",...)
"""
class MpasType(object):
"""
Clase que representa un tipo en el lemguaje mpascal. Los tipos
son declarados como instancias singleton de este tipo.
"""
def __init__(self, name, default, bin_ops=set(), un_ops=set()):
self.name = name
self.bin_ops = bin_ops
self.un_ops = un_ops
self.default = default
def __str__(self):
return str(self.name + '_type').upper()
def __repr__(self):
return str(self.name + '_type').upper()
# Crear instancias especaficas de los tipos.
int_type = MpasType("int", 0,
{'MAS', 'MENOS', 'MULT', 'DIV', 'LE', 'LT', 'EQ', 'NE', 'GT', 'GE'},
{'MAS', 'MENOS'})
float_type = MpasType("float", 0.0,
{'MAS', 'MENOS', 'MULT', 'DIV', 'LE', 'LT', 'EQ', 'NE', 'GT', 'GE'},
{'MAS', 'MENOS'})
str_type = MpasType("string", '',{},{}) | gpl-3.0 |
with-git/tensorflow | tensorflow/contrib/keras/python/keras/utils/__init__.py | 19 | 2446 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.contrib.keras.python.keras.utils import data_utils
from tensorflow.contrib.keras.python.keras.utils import generic_utils
from tensorflow.contrib.keras.python.keras.utils import io_utils
from tensorflow.contrib.keras.python.keras.utils import np_utils
from tensorflow.contrib.keras.python.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
from tensorflow.contrib.keras.python.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.contrib.keras.python.keras.utils.data_utils import Sequence
from tensorflow.contrib.keras.python.keras.utils.generic_utils import custom_object_scope
from tensorflow.contrib.keras.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import get_custom_objects
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.contrib.keras.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.io_utils import HDF5Matrix
from tensorflow.contrib.keras.python.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.contrib.keras.python.keras.utils.np_utils import normalize
from tensorflow.contrib.keras.python.keras.utils.np_utils import to_categorical
from tensorflow.contrib.keras.python.keras.utils.vis_utils import plot_model
# Globally-importable utils.
| apache-2.0 |
CAAD-RWTH/ClockworkForDynamo | nodes/2.x/python/FamilyType.CompoundStructureLayers.py | 4 | 1439 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
items = UnwrapElement(IN[0])
def GetCompoundStructureLayers(item):
layers = []
layermat = []
layerfunc = []
layerwidth = []
layercore = []
layerwraps = []
layervar = []
layerdeck = []
try:
if hasattr(item, "GetCompoundStructure"):
compstruc = item.GetCompoundStructure()
vertcomp = compstruc.IsVerticallyCompound
varlayer = compstruc.VariableLayerIndex
num = compstruc.LayerCount
counter = 0
while counter < num:
layers.append(compstruc.GetLayers()[counter])
layermat.append(item.Document.GetElement(compstruc.GetMaterialId(counter)))
layerfunc.append(compstruc.GetLayerFunction(counter))
layerwidth.append(compstruc.GetLayerWidth(counter))
layercore.append(compstruc.IsCoreLayer(counter))
if compstruc.IsCoreLayer(counter): layerwraps.append(False)
else: layerwraps.append(compstruc.ParticipatesInWrapping(counter))
if varlayer == counter: layervar.append(True)
else: layervar.append(False)
layerdeck.append(compstruc.IsStructuralDeck(counter))
counter += 1
except: pass
return layers, layermat, layerfunc, layerwidth, layercore, layerwraps, layervar, layerdeck
if isinstance(IN[0], list): OUT = map(list, zip(*[GetCompoundStructureLayers(x) for x in items]))
else: OUT = GetCompoundStructureLayers(items) | mit |
Blothorn/FTD | ftd-lua-validator/validate.py | 1 | 1175 | from google.appengine.ext import ndb
import logging
import re
from models import Script
STRING_DATA_PATTERN = re.compile(r'"BlockStringData":\["(.*?[^\\])"')
def validate(blueprint):
for data in re.findall(STRING_DATA_PATTERN, blueprint):
if not data.find('update'):
# Not a Lua block, or not doing anything.
continue
lines = data.split('\\r\\n')
idLine = lines[1] if lines[0] == '--[[' else lines[0]
canonical = Script.query(Script.idLine == idLine).get()
if canonical == None:
logging.info('No matching script.')
return False;
canonicalLines = re.split('\r?\n', canonical.body)
try:
start = lines.index(canonicalLines[0])
for i, canonicalLine in enumerate(canonicalLines):
line = lines[start + i]
if line != canonicalLine:
logging.info('"{}" does not match "{}".'
.format(line, canonicalLine))
return False
return lines[start:] == canonicalLines
except ValueError:
logging.info('Body not found (looking for "{}").'
.format(canonicalLines[0]))
return False;
return True;
| mit |
SvenLauterbach/azure-quickstart-templates | hortonworks-on-centos/scripts/vm-bootstrap.py | 89 | 53170 | #
# vm-bootstrap.py
#
# This script is used to prepare VMs launched via HDP Cluster Install Blade on Azure.
#
# Parameters passed from the bootstrap script invocation by the controller (shown in the parameter order).
# Required parameters:
# action: "bootstrap" to set up VM and initiate cluster deployment. "check" for checking on cluster deployment status.
# cluster_id: user-specified name of the cluster
# admin_password: password for the Ambari "admin" user
# Required parameters for "bootstrap" action:
# scenario_id: "evaluation" or "standard"
# num_masters: number of masters in the cluster
# num_workers: number of workers in the cluster
# master_prefix: hostname prefix for master hosts (master hosts are named <cluster_id>-<master_prefix>-<id>
# worker_prefix: hostname prefix for worker hosts (worker hosts are named <cluster_id>-<worker_prefix>-<id>
# domain_name: the domain name part of the hosts, starting with a period (e.g., .cloudapp.net)
# id_padding: number of digits for the host <id> (e.g., 2 uses <id> like 01, 02, .., 10, 11)
# masters_iplist: list of masters' local IPV4 addresses sorted from master_01 to master_XX delimited by a ','
# workers_iplist: list of workers' local IPV4 addresses sorted from worker_01 to worker_XX delimited by a ','
# Required parameters for "check" action:
# --check_timeout_seconds:
# the number of seconds after which the script is required to exit
# --report_timeout_fail:
# if "true", exit code 1 is returned in case deployment has failed, or deployment has not finished after
# check_timeout_seconds
# if "false", exit code 0 is returned if deployment has finished successfully, or deployment has not finished after
# check_timeout_seconds
# Optional:
# protocol: if "https" (default), https:8443 is used for Ambari. Otherwise, Ambari uses http:8080
from optparse import OptionParser
import base64
import json
import logging
import os
import pprint
import re
import socket
import sys
import time
import urllib2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/tmp/vm-bootstrap.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting VM Bootstrap...')
parser = OptionParser()
parser.add_option("--cluster_id", type="string", dest="cluster_id")
parser.add_option("--scenario_id", type="string", dest="scenario_id", default="evaluation")
parser.add_option("--num_masters", type="int", dest="num_masters")
parser.add_option("--num_workers", type="int", dest="num_workers")
parser.add_option("--master_prefix", type="string", dest="master_prefix")
parser.add_option("--worker_prefix", type="string", dest="worker_prefix")
parser.add_option("--domain_name", type="string", dest="domain_name")
parser.add_option("--id_padding", type="int", dest="id_padding", default=2)
parser.add_option("--admin_password", type="string", dest="admin_password", default="admin")
parser.add_option("--masters_iplist", type="string", dest="masters_iplist")
parser.add_option("--workers_iplist", type="string", dest="workers_iplist")
parser.add_option("--protocol", type="string", dest="protocol", default="https")
parser.add_option("--action", type="string", dest="action", default="bootstrap")
parser.add_option("--check_timeout_seconds", type="int", dest="check_timeout_seconds", default="250")
parser.add_option("--report_timeout_fail", type="string", dest="report_timeout_fail", default="false")
(options, args) = parser.parse_args()
cluster_id = options.cluster_id
scenario_id = options.scenario_id.lower()
num_masters = options.num_masters
num_workers = options.num_workers
master_prefix = options.master_prefix
worker_prefix = options.worker_prefix
domain_name = options.domain_name
id_padding = options.id_padding
admin_password = options.admin_password
masters_iplist = options.masters_iplist
workers_iplist = options.workers_iplist
protocol = options.protocol
action = options.action
check_timeout_seconds = options.check_timeout_seconds
report_timeout_fail = options.report_timeout_fail.lower() == "true"
logger.info('action=' + action)
admin_username = 'admin'
current_admin_password = 'admin'
request_timeout = 30
port = '8443' if (protocol == 'https') else '8080'
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
class TimeoutException(Exception):
pass
def get_ambari_auth_string():
return 'Basic ' + base64.encodestring('%s:%s' % (admin_username, current_admin_password)).replace('\n', '')
def run_system_command(command):
os.system(command)
def get_hostname(id):
if id <= num_masters:
return master_prefix + str(id).zfill(id_padding)
else:
return worker_prefix + str(id - num_masters).zfill(id_padding)
def get_fqdn(id):
return get_hostname(id) + domain_name
def get_host_ip(hostname):
if (hostname.startswith(master_prefix)):
return masters_iplist[int(hostname.split('-')[-1]) -1]
else:
return workers_iplist[int(hostname.split('-')[-1]) -1]
def get_host_ip_map(hostnames):
host_ip_map = {}
for hostname in hostnames:
num_tries = 0
ip = None
while ip is None and num_tries < 5:
try:
ip = get_host_ip(hostname)
# ip = socket.gethostbyname(hostname)
except:
time.sleep(1)
num_tries = num_tries + 1
continue
if ip is None:
logger.info('Failed to look up ip address for ' + hostname)
raise
else:
logger.info(hostname + ' resolved to ' + ip)
host_ip_map[hostname] = ip
return host_ip_map
def update_etc_hosts(host_ip_map):
logger.info('Adding entries to /etc/hosts file...')
with open("/etc/hosts", "a") as file:
for host in sorted(host_ip_map):
file.write('%s\t%s\t%s\n' % (host_ip_map[host], host + domain_name, host))
logger.info('Finished updating /etc/hosts')
def update_ambari_agent_ini(ambari_server_hostname):
logger.info('Updating ambari-agent.ini file...')
command = 'sed -i s/hostname=localhost/hostname=%s/ /etc/ambari-agent/conf/ambari-agent.ini' % ambari_server_hostname
logger.info('Executing command: ' + command)
run_system_command(command)
logger.info('Finished updating ambari-agent.ini file')
def patch_ambari_agent():
logger.info('Patching ambari-agent to prevent rpmdb corruption...')
logger.info('Finished patching ambari-server')
def enable_https():
command = """
printf 'api.ssl=true\nclient.api.ssl.cert_name=https.crt\nclient.api.ssl.key_name=https.key\nclient.api.ssl.port=8443' >> /etc/ambari-server/conf/ambari.properties
mkdir /root/ambari-cert
cd /root/ambari-cert
# create server.crt and server.key (self-signed)
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr -batch
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
echo PulUuMWPp0o4Lq6flGA0NGDKNRZQGffW2mWmJI3klSyspS7mUl > pass.txt
cp pass.txt passin.txt
# encrypts server.key with des3 as server.key.secured with the specified password
openssl rsa -in server.key -des3 -out server.key.secured -passout file:pass.txt
# creates /tmp/https.keystore.p12
openssl pkcs12 -export -in 'server.crt' -inkey 'server.key.secured' -certfile 'server.crt' -out '/var/lib/ambari-server/keys/https.keystore.p12' -password file:pass.txt -passin file:passin.txt
mv pass.txt /var/lib/ambari-server/keys/https.pass.txt
cd ..
rm -rf /root/ambari-cert
"""
run_system_command(command)
def set_admin_password(new_password, timeout):
logger.info('Setting admin password...')
def poll_until_all_agents_registered(num_hosts, timeout):
url = '%s://localhost:%s/api/v1/hosts' % (protocol, port)
logger.info('poll until all agents')
all_hosts_registered = False
start_time = time.time()
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if len(jsonResult['items']) >= num_hosts:
all_hosts_registered = True
break
except :
logger.exception('Could not poll agent status from the server.')
time.sleep(5)
if not all_hosts_registered:
raise Exception('Timed out while waiting for all agents to register')
def is_ambari_server_host():
hostname = socket.getfqdn()
hostname = hostname.split('.')[0]
logger.info(hostname)
logger.info('Checking ambari host')
logger.info(ambari_server_hostname)
return hostname == ambari_server_hostname
def create_blueprint(scenario_id):
blueprint_name = 'myblueprint'
logger.info('Creating blueprint for scenario %s' % scenario_id)
url = '%s://localhost:%s/api/v1/blueprints/%s' % (protocol, port, blueprint_name)
evaluation_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "DRPC_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "3"
}
]
small_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "9"
}
]
medium_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "HIVE_SERVER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "99"
}
]
large_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "HIVE_METASTORE"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_5",
"components" : [
{
"name" : "NODEMANAGER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_6",
"components" : [
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_7",
"components" : [
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_8",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "200"
}
]
if scenario_id == 'evaluation':
host_groups = evaluation_host_groups
elif scenario_id == 'small':
host_groups = small_host_groups
elif scenario_id == 'medium':
host_groups = medium_host_groups
elif scenario_id == 'large':
host_groups = large_host_groups
host_groups = evaluation_host_groups if scenario_id == 'evaluation' else small_host_groups
evaluation_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "sandbox",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY"
}
},
{
"hdfs-site" : {
"dfs.block.size" : "34217472",
"dfs.replication" : "1",
"dfs.namenode.accesstime.precision" : "3600000",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
"dfs.nfs.exports.allowed.hosts" : "* rw",
"dfs.datanode.max.xcievers" : "1024",
"dfs.block.access.token.enable" : "false",
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hive-site" : {
"javax.jdo.option.ConnectionPassword" : "hive",
"hive.tez.container.size" : "250",
"hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
"hive.heapsize" : "250",
"hive.users.in.admin.role" : "hue,hive",
"hive_metastore_user_passwd" : "hive",
"hive.server2.enable.impersonation": "true",
"hive.compactor.check.interval": "300s",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.timeout": "86400s",
"hive.enforce.bucketing": "true",
"hive.support.concurrency": "true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.server2.enable.doAs": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.txn.max.open.batch": "1000",
"hive.txn.timeout": "300",
"hive.security.authorization.enabled": "false",
"hive.users.in.admin.role": "hue,hive",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_2%:9083"
}
},
{
"mapred-env": {
"jobhistory_heapsize" : "250"
}
},
{
"mapred-site" : {
"mapreduce.map.memory.mb" : "250",
"mapreduce.reduce.memory.mb" : "250",
"mapreduce.task.io.sort.mb" : "64",
"yarn.app.mapreduce.am.resource.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
"mapred.job.reduce.memory.mb" : "250",
"mapred.child.java.opts" : "-Xmx200m",
"mapred.job.map.memory.mb" : "250",
"io.sort.mb" : "64",
"mapreduce.map.java.opts" : "-Xmx200m",
"mapreduce.reduce.java.opts" : "-Xmx200m"
}
},
{
"oozie-site" : {
"oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
"oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
"oozie.service.JPAService.jdbc.password" : "oozie"
}
},
{
"storm-site" : {
"logviewer.port" : 8005,
"nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=sandbox.hortonworks.com,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
"ui.childopts" : "-Xmx220m",
"drpc.childopts" : "-Xmx220m"
}
},
{
"tez-site" : {
"tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb" : "250",
"tez.dag.am.resource.memory.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m"
}
},
{
"webhcat-site" : {
"webhcat.proxyuser.hue.hosts" : "*",
"webhcat.proxyuser.hue.groups" : "*",
"webhcat.proxyuser.hcat.hosts" : "*",
"webhcat.proxyuser.hcat.groups" : "*",
"templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://sandbox.hortonworks.com:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse"
}
},
{
"yarn-env": {
"apptimelineserver_heapsize" : "250",
"resourcemanager_heapsize" : "250",
"nodemanager_heapsize" : "250",
"yarn_heapsize" : "250"
}
},
{
"yarn-site" : {
"yarn.nodemanager.resource.memory-mb": "2250",
"yarn.nodemanager.vmem-pmem-ratio" : "10",
"yarn.scheduler.minimum-allocation-mb" : "250",
"yarn.scheduler.maximum-allocation-mb" : "2250",
"yarn.nodemanager.pmem-check-enabled" : "false",
"yarn.acl.enable" : "false",
"yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*"
}
}
]
standard_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "hdp",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"yarn-site": {
"yarn.nodemanager.local-dirs": "/disks/0/hadoop/yarn/local,/disks/1/hadoop/yarn/local,/disks/2/hadoop/yarn/local,/disks/3/hadoop/yarn/local,/disks/4/hadoop/yarn/local,/disks/5/hadoop/yarn/local,/disks/6/hadoop/yarn/local,/disks/7/hadoop/yarn/local",
"yarn.nodemanager.log-dirs": "/disks/0/hadoop/yarn/log,/disks/1/hadoop/yarn/log,/disks/2/hadoop/yarn/log,/disks/3/hadoop/yarn/log,/disks/4/hadoop/yarn/log,/disks/5/hadoop/yarn/log,/disks/6/hadoop/yarn/log,/disks/7/hadoop/yarn/log,/disks/8/hadoop/yarn/log,/disks/9/hadoop/yarn/log,/disks/10/hadoop/yarn/log,/disks/11/hadoop/yarn/log,/disks/12/hadoop/yarn/log,/disks/13/hadoop/yarn/log,/disks/14/hadoop/yarn/log,/disks/15/hadoop/yarn/log",
"yarn.timeline-service.leveldb-timeline-store.path": "/disks/0/hadoop/yarn/timeline",
"yarn.nodemanager.resource.memory-mb" : "32768",
"yarn.scheduler.maximum-allocation-mb" : "32768",
"yarn.scheduler.minimum-allocation-mb" : "2048"
}
},
{
"tez-site": {
"tez.am.resource.memory.mb" : "2048",
"tez.am.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC"
}
},
{
"mapred-site": {
"mapreduce.map.java.opts" : "-Xmx1638m",
"mapreduce.map.memory.mb" : "2048",
"mapreduce.reduce.java.opts" : "-Xmx1638m",
"mapreduce.reduce.memory.mb" : "2048",
"mapreduce.task.io.sort.mb" : "819",
"yarn.app.mapreduce.am.command-opts" : "-Xmx1638m",
"yarn.app.mapreduce.am.resource.mb" : "2048"
}
},
{
"hdfs-site": {
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY",
"hbase.tmp.dir": "/disks/0/hadoop/hbase"
}
},
{
"storm-site": {
"storm.local.dir": "/disks/0/hadoop/storm"
}
},
{
"falcon-startup.properties": {
"*.config.store.uri": "file:///disks/0/hadoop/falcon/store"
}
},
{
"hive-site": {
"hive.auto.convert.join.noconditionaltask.size" : "716177408",
"hive.tez.container.size" : "2048",
"hive.tez.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_3%:9083"
}
}
]
configurations = evaluation_configurations if scenario_id == 'evaluation' else standard_configurations
data = {
"configurations" : configurations,
"host_groups": host_groups,
"Blueprints" : {
"blueprint_name" : blueprint_name,
"stack_name" : "HDP",
"stack_version" : "2.2"
}
}
data = json.dumps(data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=request_timeout)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
return 'myblueprint'
def initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers):
logger.info('Deploying cluster...')
url = '%s://localhost:%s/api/v1/clusters/%s' % (protocol, port, cluster_id)
if num_masters + num_workers < 4:
raise Exception('Cluster size must be 4 or greater')
data = {
"blueprint": blueprint_name,
"default_password": "admin",
"host_groups": [
]
}
for i in range(1, num_masters + 1):
data['host_groups'].append({
"name": "master_%d" % i,
"hosts": [{
"fqdn": get_fqdn(i)
}]
})
worker_hosts = []
for i in range(num_masters + 1, num_masters + num_workers + 1):
worker_hosts.append({
"fqdn": get_fqdn(i)
})
data['host_groups'].append({
"name": "workers",
"hosts": worker_hosts
})
data = json.dumps(data)
pprint.pprint('data=' + data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=120)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
def poll_until_cluster_deployed(cluster_id, timeout):
url = '%s://localhost:%s/api/v1/clusters/%s/requests/1?fields=Requests/progress_percent,Requests/request_status' % (protocol, port, cluster_id)
deploy_success = False
deploy_finished = False
start_time = time.time()
logger.info('poll until function')
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if jsonResult['Requests']['request_status'] == 'COMPLETED':
deploy_success = True
if int(jsonResult['Requests']['progress_percent']) == 100 or jsonResult['Requests']['request_status'] == 'FAILED':
deploy_finished = True
break
except:
logger.info('Could not poll deploy status from the server.')
time.sleep(5)
if not deploy_finished:
raise TimeoutException('Timed out while waiting for cluster deployment to finish')
elif not deploy_success:
raise Exception('Cluster deploy failed')
if action == 'bootstrap':
masters_iplist = masters_iplist.split(',')
workers_iplist = workers_iplist.split(',')
ambari_server_hostname = get_hostname(1)
all_hostnames = map((lambda i: get_hostname(i)), range(1, num_masters + num_workers + 1))
logger.info(all_hostnames)
host_ip_map = get_host_ip_map(all_hostnames)
update_etc_hosts(host_ip_map)
update_ambari_agent_ini(ambari_server_hostname)
patch_ambari_agent()
run_system_command('chkconfig ambari-agent on')
logger.info('Starting ambari-agent...')
run_system_command('ambari-agent start')
logger.info('ambari-agent started')
if is_ambari_server_host():
run_system_command('chkconfig ambari-server on')
logger.info('Running ambari-server setup...')
run_system_command('ambari-server setup -s -j /usr/jdk64/jdk1.7.0_45')
logger.info('ambari-server setup finished')
if protocol == 'https':
logger.info('Enabling HTTPS...')
enable_https()
logger.info('HTTPS enabled')
logger.info('Starting ambari-server...')
run_system_command('ambari-server start')
logger.info('ambari-server started')
try:
set_admin_password(admin_password, 60 * 2)
# set current_admin_password so that HTTP requests to Ambari start using the new user-specified password
current_admin_password = admin_password
poll_until_all_agents_registered(num_masters + num_workers, 60 * 4)
blueprint_name = create_blueprint(scenario_id)
initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers)
except:
logger.error('Failed VM Bootstrap')
sys.exit(1)
else:
try:
current_admin_password = admin_password
poll_until_cluster_deployed(cluster_id, check_timeout_seconds)
except TimeoutException as e:
logger.info(e)
if report_timeout_fail:
logger.error('Failed cluster deployment')
sys.exit(1)
else:
logger.info('Cluster deployment has not completed')
sys.exit(0)
except:
logger.error('Failed cluster deployment')
sys.exit(1)
logger.info('Finished VM Bootstrap successfully')
sys.exit(0)
| mit |
jymannob/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/mechanize/_urllib2.py | 134 | 1299 | # urllib2 work-alike interface
# ...from urllib2...
from urllib2 import \
URLError, \
HTTPError
# ...and from mechanize
from _auth import \
HTTPProxyPasswordMgr, \
HTTPSClientCertMgr
from _debug import \
HTTPResponseDebugProcessor, \
HTTPRedirectDebugProcessor
# crap ATM
## from _gzip import \
## HTTPGzipProcessor
from _urllib2_fork import \
AbstractBasicAuthHandler, \
AbstractDigestAuthHandler, \
BaseHandler, \
CacheFTPHandler, \
FileHandler, \
FTPHandler, \
HTTPBasicAuthHandler, \
HTTPCookieProcessor, \
HTTPDefaultErrorHandler, \
HTTPDigestAuthHandler, \
HTTPErrorProcessor, \
HTTPHandler, \
HTTPPasswordMgr, \
HTTPPasswordMgrWithDefaultRealm, \
HTTPRedirectHandler, \
ProxyBasicAuthHandler, \
ProxyDigestAuthHandler, \
ProxyHandler, \
UnknownHandler
from _http import \
HTTPEquivProcessor, \
HTTPRefererProcessor, \
HTTPRefreshProcessor, \
HTTPRobotRulesProcessor, \
RobotExclusionError
import httplib
if hasattr(httplib, 'HTTPS'):
from _urllib2_fork import HTTPSHandler
del httplib
from _opener import OpenerDirector, \
SeekableResponseOpener, \
build_opener, install_opener, urlopen
from _request import \
Request
| gpl-3.0 |
akpersad/GitGutter | git_gutter.py | 4 | 5006 | import os
import sublime
import sublime_plugin
try:
from .view_collection import ViewCollection
except (ImportError, ValueError):
from view_collection import ViewCollection
ST3 = int(sublime.version()) >= 3000
def plugin_loaded():
global settings
settings = sublime.load_settings('GitGutter.sublime-settings')
class GitGutterCommand(sublime_plugin.WindowCommand):
region_names = ['deleted_top', 'deleted_bottom',
'deleted_dual', 'inserted', 'changed',
'untracked', 'ignored']
def run(self, force_refresh=False):
self.view = self.window.active_view()
if not self.view:
# View is not ready yet, try again later.
sublime.set_timeout(self.run, 1)
return
self.clear_all()
show_untracked = settings.get('show_markers_on_untracked_file', False)
if ViewCollection.untracked(self.view):
if show_untracked:
self.bind_files('untracked')
elif ViewCollection.ignored(self.view):
if show_untracked:
self.bind_files('ignored')
else:
# If the file is untracked there is no need to execute the diff
# update
if force_refresh:
ViewCollection.clear_git_time(self.view)
inserted, modified, deleted = ViewCollection.diff(self.view)
self.lines_removed(deleted)
self.bind_icons('inserted', inserted)
self.bind_icons('changed', modified)
if(ViewCollection.show_status(self.view) != "none"):
if(ViewCollection.show_status(self.view) == 'all'):
branch = ViewCollection.current_branch(
self.view).decode("utf-8").strip()
else:
branch = ""
self.update_status(len(inserted),
len(modified),
len(deleted),
ViewCollection.get_compare(self.view), branch)
else:
self.update_status(0, 0, 0, "", "")
def update_status(self, inserted, modified, deleted, compare, branch):
def set_status_if(test, key, message):
if test:
self.view.set_status("git_gutter_status_" + key, message)
else:
self.view.set_status("git_gutter_status_" + key, "")
set_status_if(inserted > 0, "inserted", "Inserted : %d" % inserted)
set_status_if(modified > 0, "modified", "Modified : %d" % modified)
set_status_if(deleted > 0, "deleted", "Deleted : %d regions" % deleted)
set_status_if(compare, "comparison", "Comparing against : %s" % compare)
set_status_if(branch, "branch", "On branch : %s" % branch)
def clear_all(self):
for region_name in self.region_names:
self.view.erase_regions('git_gutter_%s' % region_name)
def lines_to_regions(self, lines):
regions = []
for line in lines:
position = self.view.text_point(line - 1, 0)
region = sublime.Region(position, position)
regions.append(region)
return regions
def lines_removed(self, lines):
top_lines = lines
bottom_lines = [line - 1 for line in lines if line > 1]
dual_lines = []
for line in top_lines:
if line in bottom_lines:
dual_lines.append(line)
for line in dual_lines:
bottom_lines.remove(line)
top_lines.remove(line)
self.bind_icons('deleted_top', top_lines)
self.bind_icons('deleted_bottom', bottom_lines)
self.bind_icons('deleted_dual', dual_lines)
def plugin_dir(self):
path = os.path.realpath(__file__)
root = os.path.split(os.path.dirname(path))[1]
return os.path.splitext(root)[0]
def icon_path(self, icon_name):
if icon_name in ['deleted_top','deleted_bottom','deleted_dual']:
if self.view.line_height() > 15:
icon_name = icon_name + "_arrow"
if int(sublime.version()) < 3014:
path = '../GitGutter'
extn = ''
else:
path = 'Packages/' + self.plugin_dir()
extn = '.png'
return "/".join([path, 'icons', icon_name + extn])
def bind_icons(self, event, lines):
regions = self.lines_to_regions(lines)
event_scope = event
if event.startswith('deleted'):
event_scope = 'deleted'
scope = 'markup.%s.git_gutter' % event_scope
icon = self.icon_path(event)
self.view.add_regions('git_gutter_%s' % event, regions, scope, icon)
def bind_files(self, event):
lines = []
lineCount = ViewCollection.total_lines(self.view)
i = 0
while i < lineCount:
lines += [i + 1]
i = i + 1
self.bind_icons(event, lines)
if not ST3:
plugin_loaded()
| mit |
sym3tri/mitro | browser-ext/login/test/chrome/__init__.py | 80 | 1053 |
# *****************************************************************************
# Copyright (c) 2012, 2013, 2014 Lectorius, Inc.
# Authors:
# Vijay Pandurangan (vijayp@mitro.co)
# Evan Jones (ej@mitro.co)
# Adam Hilss (ahilss@mitro.co)
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact the authors at inbound@mitro.co.
# *****************************************************************************
| gpl-3.0 |
0x27/clusterd | src/platform/tomcat/authenticate.py | 8 | 2720 | from src.platform.tomcat.interfaces import TINTERFACES
from requests.auth import HTTPBasicAuth
from requests.utils import dict_from_cookiejar
from sys import stdout
from log import LOG
import state
import utility
default_credentials = [("tomcat", "tomcat"),
("role1", "role1"),
("admin", "admin"),
("both", "tomcat"),
("admin", "changethis")]
def _auth(usr, pswd, url):
"""
"""
res = utility.requests_get(url, auth=HTTPBasicAuth(usr, pswd))
if res.status_code is 200:
utility.Msg("Successfully authenticated with %s:%s" % (usr, pswd), LOG.DEBUG)
return (dict_from_cookiejar(res.cookies), HTTPBasicAuth(usr, pswd))
def checkAuth(ip, port, title, version):
"""
"""
if title == TINTERFACES.MAN:
url = "http://{0}:{1}/manager/html".format(ip, port)
# check with given auth
if state.usr_auth:
(usr, pswd) = state.usr_auth.split(":")
return _auth(usr, pswd, url)
# else try default credentials
for (usr, pswd) in default_credentials:
cook = _auth(usr, pswd, url)
if cook:
return cook
# if we're still here, check if they supplied a wordlist
if state.bf_wordlist and not state.hasbf:
state.hasbf = True
wordlist = []
with open(state.bf_wordlist, "r") as f:
wordlist = [x.decode("ascii", "ignore").rstrip() for x in f.readlines()]
utility.Msg("Brute forcing %s account with %d passwords..." %
(state.bf_user, len(wordlist)), LOG.DEBUG)
try:
for (idx, word) in enumerate(wordlist):
stdout.flush()
stdout.write("\r\033[32m [%s] Brute forcing password for %s [%d/%d]\033[0m"
% (utility.timestamp(), state.bf_user, idx+1, len(wordlist)))
cook = _auth(state.bf_user, word, url)
if cook:
print ''
# lets insert these credentials to the default list so we
# don't need to bruteforce it each time
if not (state.bf_user, word) in default_credentials:
default_credentials.insert(0, (state.bf_user, word))
utility.Msg("Successful login %s:%s" % (state.bf_user, word),
LOG.SUCCESS)
return cook
print ''
except KeyboardInterrupt:
pass
| mit |
ossdemura/django-miniblog | src/Lib/site-packages/pip/_vendor/distlib/scripts.py | 333 | 15224 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
rcarrillocruz/ansible | lib/ansible/modules/storage/infinidat/infini_host.py | 69 | 4439 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_host
version_added: 2.3
short_description: Create, Delete and Modify Hosts on Infinibox
description:
- This module creates, deletes or modifies hosts on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Host Name
required: true
state:
description:
- Creates/Modifies Host when present or removes when absent
required: false
default: present
choices: [ "present", "absent" ]
wwns:
description:
- List of wwns of the host
required: false
volume:
description:
- Volume name to map to the host
required: false
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Create new new host
infini_host:
name: foo.example.com
user: admin
password: secret
system: ibox001
- name: Make sure host bar is available with wwn ports
infini_host:
name: bar.example.com
wwns:
- "00:00:00:00:00:00:00"
- "11:11:11:11:11:11:11"
system: ibox01
user: admin
password: secret
- name: Map host foo.example.com to volume bar
infini_host:
name: foo.example.com
volume: bar
system: ibox01
user: admin
password: secret
'''
RETURN = '''
'''
HAS_INFINISDK = True
try:
from infinisdk import InfiniBox, core
except ImportError:
HAS_INFINISDK = False
from ansible.module_utils.infinibox import *
from collections import Counter
@api_wrapper
def get_host(module, system):
host = None
for h in system.hosts.to_list():
if h.get_name() == module.params['name']:
host = h
break
return host
@api_wrapper
def create_host(module, system):
changed = True
if not module.check_mode:
host = system.hosts.create(name=module.params['name'])
if module.params['wwns']:
for p in module.params['wwns']:
host.add_fc_port(p)
if module.params['volume']:
host.map_volume(system.volumes.get(name=module.params['volume']))
module.exit_json(changed=changed)
@api_wrapper
def update_host(module, host):
changed = False
name = module.params['name']
module.exit_json(changed=changed)
@api_wrapper
def delete_host(module, host):
changed = True
if not module.check_mode:
host.delete()
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
state = dict(default='present', choices=['present', 'absent']),
wwns = dict(type='list'),
volume = dict()
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
state = module.params['state']
system = get_system(module)
host = get_host(module, system)
if module.params['volume']:
try:
system.volumes.get(name=module.params['volume'])
except:
module.fail_json(msg='Volume {} not found'.format(module.params['volume']))
if host and state == 'present':
update_host(module, host)
elif host and state == 'absent':
delete_host(module, host)
elif host is None and state == 'absent':
module.exit_json(changed=False)
else:
create_host(module, system)
# Import Ansible Utilities
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
GuoshunWu/googlemock | test/gmock_test_utils.py | 769 | 3684 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Mocking Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
# Determines path to gtest_test_utils and imports it.
SCRIPT_DIR = os.path.dirname(__file__) or '.'
# isdir resolves symbolic links.
gtest_tests_util_dir = os.path.join(SCRIPT_DIR, '../gtest/test')
if os.path.isdir(gtest_tests_util_dir):
GTEST_TESTS_UTIL_DIR = gtest_tests_util_dir
else:
GTEST_TESTS_UTIL_DIR = os.path.join(SCRIPT_DIR, '../../gtest/test')
sys.path.append(GTEST_TESTS_UTIL_DIR)
import gtest_test_utils # pylint: disable-msg=C6204
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return gtest_test_utils.GetSourceDir()
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
return gtest_test_utils.GetTestExecutablePath(executable_name)
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
# Suppresses the "Invalid const name" lint complaint
# pylint: disable-msg=C6409
# Exposes utilities from gtest_test_utils.
Subprocess = gtest_test_utils.Subprocess
TestCase = gtest_test_utils.TestCase
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
PREMATURE_EXIT_FILE_ENV_VAR = gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR
# pylint: enable-msg=C6409
def Main():
"""Runs the unit test."""
gtest_test_utils.Main()
| bsd-3-clause |
sphax3d/gedit-plugins | plugins/commander/modules/doc.py | 2 | 7056 | # -*- coding: utf-8 -*-
#
# doc.py - doc commander module
#
# Copyright (C) 2010 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import commander.commands as commands
import commander.commands.completion
import commander.commands.result
import commander.commands.exceptions
import re
__commander_module__ = True
class Argument:
def __init__(self, argtype, typename, name):
self.type = argtype.strip()
self.type_name = typename.strip()
self.name = name.strip()
class Function:
def __init__(self, text):
self._parse(text)
def _parse(self, text):
self.valid = False
parser = re.compile('^\\s*(?:(?:\\b(?:static|inline)\\b)\\s+)?(([a-z_:][a-z0-9_:<>]*)(?:\\s*(?:\\b(?:const)\\b)\\s*)?\\s*[*&]*\\s+)?([a-z_][a-z0-9_:~]*)\\s*\\(([^)]*)\\)(\\s*const)?', re.I)
m = parser.match(text)
if not m:
return
self.valid = True
self.return_type = m.group(1) and m.group(1).strip() != 'void' and m.group(1).strip()
self.return_type_name = self.return_type and m.group(2).strip()
parts = m.group(3).split('::')
self.name = parts[-1]
if len(parts) > 1:
self.classname = '::'.join(parts[0:-1])
else:
self.classname = None
self.constructor = self.name == self.classname
self.destructor = self.name == '~%s' % (self.classname,)
self.const = m.group(5) != None
self.args = []
argre = re.compile('(([a-z_:][a-z0-9_:<>]*)(?:\\s*(?:\\s*\\bconst\\b\\s*|[*&])\s*)*)\\s*([a-z_][a-z_0-9]*)$', re.I)
for arg in m.group(4).split(','):
arg = arg.strip()
if arg == 'void' or arg == '':
continue
else:
m2 = argre.match(arg.strip())
if not m2:
self.valid = False
return
arg = Argument(m2.group(1), m2.group(2), m2.group(3))
self.args.append(arg)
class Documenter:
def __init__(self, window, view, iter):
self.window = window
self.view = view
self.iter = iter
bus = self.window.get_message_bus()
self.canplaceholder = bus.is_registered('/plugins/snippets', 'parse-and-activate')
self.placeholder = 1
self.text = ''
def append(self, *args):
for text in args:
self.text += str(text)
return self
def append_placeholder(self, *args):
if not self.canplaceholder:
return self.append(*args)
text = " ".join(map(lambda x: str(x), args))
self.text += "${%d:%s}" % (self.placeholder, text)
self.placeholder += 1
return self
def insert(self):
if self.canplaceholder:
bus = self.window.get_message_bus()
bus.send('/plugins/snippets', 'parse-and-activate', snippet=self.text, iter=self.iter, view=self.view)
def _make_documenter(window, view):
buf = view.get_buffer()
bus = window.get_message_bus()
canplaceholder = bus.lookup('/plugins/snippets', 'parse-and-activate') != None
insert = buf.get_iter_at_mark(buf.get_insert())
insert.set_line_offset(0)
offset = insert.get_offset()
end = insert.copy()
# This is just something random
if not end.forward_chars(500):
end = buf.get_end_iter()
text = insert.get_text(end)
func = Function(text)
if not func.valid:
raise commander.commands.exceptions.Execute('Could not find function specification')
doc = Documenter(window, view, insert)
return doc, func
def gtk(window, view):
"""Generate gtk-doc documentation: doc.gtk
Generate a documentation template for the C or C++ function defined at the
cursor. The cursor needs to be on the first line of the function declaration
for it to work."""
buf = view.get_buffer()
lang = buf.get_language()
if not lang or not lang.get_id() in ('c', 'chdr', 'cpp'):
raise commander.commands.exceptions.Execute('Don\'t know about this language')
doc, func = _make_documenter(window, view)
# Generate docstring for this function
doc.append("/**\n * ", func.name, ":\n")
structp = re.compile('([A-Z]+[a-zA-Z]*)|struct\s+_([A-Z]+[a-zA-Z]*)')
for arg in func.args:
sm = structp.match(arg.type_name)
doc.append(" * @", arg.name, ": ")
if sm:
doc.append_placeholder("a #%s" % (sm.group(1) or sm.group(2),))
else:
doc.append_placeholder("Description")
doc.append(".\n")
doc.append(" *\n * ").append_placeholder("Description").append(".\n")
if func.return_type:
sm = structp.match(func.return_type_name)
doc.append(" *\n * Returns: ")
if sm:
doc.append_placeholder("a #%s" % (sm.group(1) or sm.group(2),))
else:
doc.append_placeholder("Description")
doc.append(".\n")
doc.append(" *\n **/\n")
doc.insert()
def doxygen(window, view):
"""Generate doxygen documentation: doc.doxygen
Generate a documentation template for the function defined at the
cursor. The cursor needs to be on the first line of the function declaration
for it to work."""
buf = view.get_buffer()
if not buf.get_language().get_id() in ('c', 'chdr', 'cpp'):
raise commander.commands.exceptions.Execute('Don\'t know about this language')
doc, func = _make_documenter(window, view)
# Generate docstring for this function
doc.append("/** \\brief ").append_placeholder("Short description")
if func.const:
doc.append(" (const)")
doc.append(".\n")
for arg in func.args:
doc.append(" * @param ", arg.name, " ").append_placeholder("Description").append("\n")
doc.append(" *\n * ")
if func.constructor:
doc.append("Constructor.\n *\n * ")
elif func.destructor:
doc.append("Destructor.\n *\n * ")
doc.append_placeholder("Detailed description").append(".\n")
if func.return_type:
doc.append(" *\n * @return: ")
if func.return_type == 'bool':
doc.append("true if ").append_placeholder("Description").append(", false otherwise")
else:
doc.append_placeholder("Description")
doc.append("\n")
doc.append(" *\n */\n")
doc.insert()
# vi:ex:ts=4:et
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/266_test_multifile.py | 138 | 1702 | from test import test_support
mimetools = test_support.import_module('mimetools', deprecated=True)
multifile = test_support.import_module('multifile', deprecated=True)
import cStringIO
msg = """Mime-Version: 1.0
Content-Type: multipart/mixed;
boundary="=====================_590453667==_"
X-OriginalArrivalTime: 05 Feb 2002 03:43:23.0310 (UTC) FILETIME=[42D88CE0:01C1ADF7]
--=====================_590453667==_
Content-Type: multipart/alternative;
boundary="=====================_590453677==_.ALT"
--=====================_590453677==_.ALT
Content-Type: text/plain; charset="us-ascii"; format=flowed
test A
--=====================_590453677==_.ALT
Content-Type: text/html; charset="us-ascii"
<html>
<b>test B</font></b></html>
--=====================_590453677==_.ALT--
--=====================_590453667==_
Content-Type: text/plain; charset="us-ascii"
Content-Disposition: attachment; filename="att.txt"
Attached Content.
Attached Content.
Attached Content.
Attached Content.
--=====================_590453667==_--
"""
def getMIMEMsg(mf):
global boundaries, linecount
msg = mimetools.Message(mf)
#print "TYPE: %s" % msg.gettype()
if msg.getmaintype() == 'multipart':
boundary = msg.getparam("boundary")
boundaries += 1
mf.push(boundary)
while mf.next():
getMIMEMsg(mf)
mf.pop()
else:
lines = mf.readlines()
linecount += len(lines)
def test_main():
global boundaries, linecount
boundaries = 0
linecount = 0
f = cStringIO.StringIO(msg)
getMIMEMsg(multifile.MultiFile(f))
assert boundaries == 2
assert linecount == 9
if __name__ == '__main__':
test_main()
| gpl-3.0 |
pk-sam/crosswalk-test-suite | misc/webdriver-w3c-tests/base_test.py | 5 | 1745 |
import ConfigParser
import json
import os
import sys
import unittest
from webserver import Httpd
from network import get_lan_ip
class WebDriverBaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = create_driver()
cls.webserver = Httpd(host=get_lan_ip())
cls.webserver.__dict__['mobile'] = os.environ.get("WD_BROWSER", 'firefox')
cls.webserver.__dict__['appId'] = appId
cls.webserver.start()
@classmethod
def tearDownClass(cls):
cls.webserver.stop()
if cls.driver:
cls.driver.quit()
appId = None
def create_driver():
config = ConfigParser.ConfigParser()
config.read('webdriver.cfg')
section = os.environ.get("WD_BROWSER", 'firefox')
url = 'http://127.0.0.1:4444/wd/hub'
if config.has_option(section, 'url'):
url = config.get(section, "url")
capabilities = None
if config.has_option(section, 'capabilities'):
try:
capabilities = json.loads(config.get(section, "capabilities"))
except:
pass
mode = 'compatibility'
if config.has_option(section, 'mode'):
mode = config.get(section, 'mode')
if section == "android" or section == "tizen":
# import xwalk webdriver
exec "from selenium import webdriver"
# Save appId to build path of tizen app
if section == "tizen":
global appId
appId = capabilities["xwalkOptions"]["tizenAppId"]
return webdriver.Remote(url, capabilities)
else:
# import browser webdriver
exec "from webdriver.driver import WebDriver"
exec "from webdriver import exceptions, wait"
return WebDriver(url, {}, capabilities, mode)
| bsd-3-clause |
rohitwaghchaure/frappe-digitales | frappe/patches/v4_0/rename_sitemap_to_route.py | 26 | 1161 | import frappe
from frappe.model import rename_field
def execute():
tables = frappe.db.sql_list("show tables")
for doctype in ("Website Sitemap", "Website Sitemap Config"):
if "tab{}".format(doctype) in tables:
frappe.delete_doc("DocType", doctype, force=1)
frappe.db.sql("drop table `tab{}`".format(doctype))
if "tabWebsite Route Permission" not in tables:
frappe.rename_doc("DocType", "Website Sitemap Permission", "Website Route Permission", force=True)
for d in ("Blog Category", "Blog Post", "Web Page", "Website Group"):
frappe.reload_doc("website", "doctype", frappe.scrub(d))
rename_field_if_exists(d, "parent_website_sitemap", "parent_website_route")
frappe.reload_doc("website", "doctype", "website_route_permission")
rename_field_if_exists("Website Route Permission", "website_sitemap", "website_route")
for d in ("blog_category", "blog_post", "web_page", "website_group", "post", "user_vote"):
frappe.reload_doc("website", "doctype", d)
def rename_field_if_exists(doctype, old_fieldname, new_fieldname):
try:
rename_field(doctype, old_fieldname, new_fieldname)
except Exception, e:
if e.args[0] != 1054:
raise
| mit |
rbrito/pkg-youtube-dl | youtube_dl/extractor/uol.py | 10 | 5537 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
clean_html,
int_or_none,
parse_duration,
parse_iso8601,
qualities,
update_url_query,
)
class UOLIE(InfoExtractor):
IE_NAME = 'uol.com.br'
_VALID_URL = r'https?://(?:.+?\.)?uol\.com\.br/.*?(?:(?:mediaId|v)=|view/(?:[a-z0-9]+/)?|video(?:=|/(?:\d{4}/\d{2}/\d{2}/)?))(?P<id>\d+|[\w-]+-[A-Z0-9]+)'
_TESTS = [{
'url': 'http://player.mais.uol.com.br/player_video_v3.swf?mediaId=15951931',
'md5': '4f1e26683979715ff64e4e29099cf020',
'info_dict': {
'id': '15951931',
'ext': 'mp4',
'title': 'Miss simpatia é encontrada morta',
'description': 'md5:3f8c11a0c0556d66daf7e5b45ef823b2',
'timestamp': 1470421860,
'upload_date': '20160805',
}
}, {
'url': 'http://tvuol.uol.com.br/video/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326',
'md5': '2850a0e8dfa0a7307e04a96c5bdc5bc2',
'info_dict': {
'id': '15954259',
'ext': 'mp4',
'title': 'Incêndio destrói uma das maiores casas noturnas de Londres',
'description': 'Em Londres, um incêndio destruiu uma das maiores boates da cidade. Não há informações sobre vítimas.',
'timestamp': 1470674520,
'upload_date': '20160808',
}
}, {
'url': 'http://mais.uol.com.br/static/uolplayer/index.html?mediaId=15951931',
'only_matching': True,
}, {
'url': 'http://mais.uol.com.br/view/15954259',
'only_matching': True,
}, {
'url': 'http://noticias.band.uol.com.br/brasilurgente/video/2016/08/05/15951931/miss-simpatia-e-encontrada-morta.html',
'only_matching': True,
}, {
'url': 'http://videos.band.uol.com.br/programa.asp?e=noticias&pr=brasil-urgente&v=15951931&t=Policia-desmonte-base-do-PCC-na-Cracolandia',
'only_matching': True,
}, {
'url': 'http://mais.uol.com.br/view/cphaa0gl2x8r/incendio-destroi-uma-das-maiores-casas-noturnas-de-londres-04024E9A3268D4C95326',
'only_matching': True,
}, {
'url': 'http://noticias.uol.com.br//videos/assistir.htm?video=rafaela-silva-inspira-criancas-no-judo-04024D983968D4C95326',
'only_matching': True,
}, {
'url': 'http://mais.uol.com.br/view/e0qbgxid79uv/15275470',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
# https://api.mais.uol.com.br/apiuol/v4/player/data/[MEDIA_ID]
'https://api.mais.uol.com.br/apiuol/v3/media/detail/' + video_id,
video_id)['item']
media_id = compat_str(video_data['mediaId'])
title = video_data['title']
ver = video_data.get('revision', 2)
uol_formats = self._download_json(
'https://croupier.mais.uol.com.br/v3/formats/%s/jsonp' % media_id,
media_id)
quality = qualities(['mobile', 'WEBM', '360p', '720p', '1080p'])
formats = []
for format_id, f in uol_formats.items():
if not isinstance(f, dict):
continue
f_url = f.get('url') or f.get('secureUrl')
if not f_url:
continue
query = {
'ver': ver,
'r': 'http://mais.uol.com.br',
}
for k in ('token', 'sign'):
v = f.get(k)
if v:
query[k] = v
f_url = update_url_query(f_url, query)
format_id = format_id
if format_id == 'HLS':
m3u8_formats = self._extract_m3u8_formats(
f_url, media_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
encoded_query = compat_urllib_parse_urlencode(query)
for m3u8_f in m3u8_formats:
m3u8_f['extra_param_to_segment_url'] = encoded_query
m3u8_f['url'] = update_url_query(m3u8_f['url'], query)
formats.extend(m3u8_formats)
continue
formats.append({
'format_id': format_id,
'url': f_url,
'quality': quality(format_id),
'preference': -1,
})
self._sort_formats(formats)
tags = []
for tag in video_data.get('tags', []):
tag_description = tag.get('description')
if not tag_description:
continue
tags.append(tag_description)
thumbnails = []
for q in ('Small', 'Medium', 'Wmedium', 'Large', 'Wlarge', 'Xlarge'):
q_url = video_data.get('thumb' + q)
if not q_url:
continue
thumbnails.append({
'id': q,
'url': q_url,
})
return {
'id': media_id,
'title': title,
'description': clean_html(video_data.get('description')),
'thumbnails': thumbnails,
'duration': parse_duration(video_data.get('duration')),
'tags': tags,
'formats': formats,
'timestamp': parse_iso8601(video_data.get('publishDate'), ' '),
'view_count': int_or_none(video_data.get('viewsQtty')),
}
| unlicense |
darkwing/kuma | vendor/packages/translate/convert/test_txt2po.py | 25 | 4944 | #!/usr/bin/env python
from translate.convert import test_convert, txt2po
from translate.misc import wStringIO
from translate.storage import txt
class TestTxt2PO:
def txt2po(self, txtsource, template=None):
"""helper that converts txt source to po source without requiring files"""
inputfile = wStringIO.StringIO(txtsource)
inputtxt = txt.TxtFile(inputfile)
convertor = txt2po.txt2po()
outputpo = convertor.convertstore(inputtxt)
return outputpo
def singleelement(self, storage):
"""checks that the pofile contains a single non-header element, and returns it"""
print(str(storage))
assert len(storage.units) == 1
return storage.units[0]
def test_simple(self):
"""test the most basic txt conversion"""
txtsource = "A simple string"
poexpected = '''#: :1
msgid "A simple string"
msgstr ""
'''
poresult = self.txt2po(txtsource)
assert str(poresult.units[1]) == poexpected
def test_miltiple_units(self):
"""test that we can handle txt with multiple units"""
txtsource = """First unit
Still part of first unit
Second unit is a heading
------------------------
Third unit with blank after but no more units.
"""
poresult = self.txt2po(txtsource)
assert poresult.units[0].isheader()
assert len(poresult.units) == 4
def test_carriage_return(self):
"""Remove carriage returns from files in dos format."""
txtsource = '''The rapid expansion of telecommunications infrastructure in recent years has\r
helped to bridge the digital divide to a limited extent.\r
'''
txtexpected = '''The rapid expansion of telecommunications infrastructure in recent years has
helped to bridge the digital divide to a limited extent.'''
poresult = self.txt2po(txtsource)
pounit = poresult.units[1]
assert str(pounit.getsource()) == txtexpected
class TestDoku2po:
def doku2po(self, txtsource, template=None):
"""helper that converts dokuwiki source to po source without requiring files."""
inputfile = wStringIO.StringIO(txtsource)
inputtxt = txt.TxtFile(inputfile, flavour="dokuwiki")
convertor = txt2po.txt2po()
outputpo = convertor.convertstore(inputtxt)
return outputpo
def singleelement(self, storage):
"""checks that the pofile contains a single non-header element, and returns it"""
print(str(storage))
assert len(storage.units) == 1
return storage.units[0]
def test_basic(self):
"""Tests that we can convert some basic things."""
dokusource = """=====Heading=====
This is a wiki page.
"""
poresult = self.doku2po(dokusource)
assert poresult.units[0].isheader()
assert len(poresult.units) == 3
assert poresult.units[1].source == "Heading"
assert poresult.units[2].source == "This is a wiki page."
def test_bullets(self):
"""Tests that we can convert some basic things."""
dokusource = """ * This is a fact.
* This is a fact.
"""
poresult = self.doku2po(dokusource)
assert poresult.units[0].isheader()
assert len(poresult.units) == 3
assert poresult.units[1].source == "This is a fact."
assert poresult.units[2].source == "This is a fact."
def test_numbers(self):
"""Tests that we can convert some basic things."""
dokusource = """ - This is an item.
- This is an item.
"""
poresult = self.doku2po(dokusource)
assert poresult.units[0].isheader()
assert len(poresult.units) == 3
assert poresult.units[1].source == "This is an item."
assert poresult.units[2].source == "This is an item."
def test_spacing(self):
"""Tests that we can convert some basic things."""
dokusource = """ ===== Heading =====
* This is an item.
* This is a subitem.
* This is a tabbed item.
"""
poresult = self.doku2po(dokusource)
assert poresult.units[0].isheader()
assert len(poresult.units) == 5
assert poresult.units[1].source == "Heading"
assert poresult.units[2].source == "This is an item."
assert poresult.units[3].source == "This is a subitem."
assert poresult.units[4].source == "This is a tabbed item."
class TestTxt2POCommand(test_convert.TestConvertCommand, TestTxt2PO):
"""Tests running actual txt2po commands on files"""
convertmodule = txt2po
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--duplicates")
options = self.help_check(options, "--encoding")
options = self.help_check(options, "--flavour", last=True)
| mpl-2.0 |
rimbalinux/LMD3 | django/db/models/aggregates.py | 12 | 2168 | """
Classes to represent the definitions of aggregate functions.
"""
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
epam-mooc/edx-platform | cms/djangoapps/contentstore/tests/test_orphan.py | 32 | 4165 | """
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
class TestOrphan(CourseTestCase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
runtime = self.course.runtime
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None, runtime)
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', runtime)
self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None, runtime)
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', runtime)
self._create_item('html', 'OrphanHtml', "<p>Hello</p>", {'display_name': 'Orphan html'}, None, None, runtime)
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, runtime)
self._create_item('about', 'overview', "<p>overview</p>", {}, None, None, runtime)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, runtime)
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def _create_item(self, category, name, data, metadata, parent_category, parent_name, runtime):
location = self.course.location.replace(category=category, name=name)
store = modulestore()
store.create_item(
self.user.id,
location.course_key,
location.block_type,
location.block_id,
definition_data=data,
metadata=metadata,
runtime=runtime
)
if parent_name:
# add child to parent in mongo
parent_location = self.course.location.replace(category=parent_category, name=parent_name)
parent = store.get_item(parent_location)
parent.children.append(location)
store.update_item(parent, self.user.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
| agpl-3.0 |
mgaitan/django-rest-framework | tests/test_reverse.py | 79 | 1546 | from __future__ import unicode_literals
from django.conf.urls import url
from django.core.urlresolvers import NoReverseMatch
from django.test import TestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
def null_view(request):
pass
urlpatterns = [
url(r'^view$', null_view, name='view'),
]
class MockVersioningScheme(object):
def __init__(self, raise_error=False):
self.raise_error = raise_error
def reverse(self, *args, **kwargs):
if self.raise_error:
raise NoReverseMatch()
return 'http://scheme-reversed/view'
class ReverseTests(TestCase):
"""
Tests for fully qualified URLs when using `reverse`.
"""
urls = 'tests.test_reverse'
def test_reversed_urls_are_fully_qualified(self):
request = factory.get('/view')
url = reverse('view', request=request)
self.assertEqual(url, 'http://testserver/view')
def test_reverse_with_versioning_scheme(self):
request = factory.get('/view')
request.versioning_scheme = MockVersioningScheme()
url = reverse('view', request=request)
self.assertEqual(url, 'http://scheme-reversed/view')
def test_reverse_with_versioning_scheme_fallback_to_default_on_error(self):
request = factory.get('/view')
request.versioning_scheme = MockVersioningScheme(raise_error=True)
url = reverse('view', request=request)
self.assertEqual(url, 'http://testserver/view')
| bsd-2-clause |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/gdb/webkit.py | 115 | 11357 | # Copyright (C) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""GDB support for WebKit types.
Add this to your gdb by amending your ~/.gdbinit as follows:
python
import sys
sys.path.insert(0, "/path/to/tools/gdb/")
import webkit
"""
import gdb
import re
import struct
def guess_string_length(ptr):
"""Guess length of string pointed by ptr.
Returns a tuple of (length, an error message).
"""
# Try to guess at the length.
for i in xrange(0, 2048):
try:
if int((ptr + i).dereference()) == 0:
return i, ''
except RuntimeError:
# We indexed into inaccessible memory; give up.
return i, ' (gdb hit inaccessible memory)'
return 256, ' (gdb found no trailing NUL)'
def ustring_to_string(ptr, length=None):
"""Convert a pointer to UTF-16 data into a Python string encoded with utf-8.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
char_vals = [int((ptr + i).dereference()) for i in xrange(length)]
string = struct.pack('H' * length, *char_vals).decode('utf-16', 'replace').encode('utf-8')
return string + error_message
def lstring_to_string(ptr, length=None):
"""Convert a pointer to LChar* data into a Python (non-Unicode) string.
ptr and length are both gdb.Value objects.
If length is unspecified, will guess at the length."""
error_message = ''
if length is None:
length, error_message = guess_string_length(ptr)
else:
length = int(length)
string = ''.join([chr((ptr + i).dereference()) for i in xrange(length)])
return string + error_message
class StringPrinter(object):
"Shared code between different string-printing classes"
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
class UCharStringPrinter(StringPrinter):
"Print a UChar*; we must guess at the length"
def to_string(self):
return ustring_to_string(self.val)
class LCharStringPrinter(StringPrinter):
"Print a LChar*; we must guess at the length"
def to_string(self):
return lstring_to_string(self.val)
class WTFAtomicStringPrinter(StringPrinter):
"Print a WTF::AtomicString"
def to_string(self):
return self.val['m_string']
class WTFCStringPrinter(StringPrinter):
"Print a WTF::CString"
def to_string(self):
# The CString holds a buffer, which is a refptr to a WTF::CStringBuffer.
data = self.val['m_buffer']['m_ptr']['m_data'].cast(gdb.lookup_type('char').pointer())
length = self.val['m_buffer']['m_ptr']['m_length']
return ''.join([chr((data + i).dereference()) for i in xrange(length)])
class WTFStringImplPrinter(StringPrinter):
"Print a WTF::StringImpl"
def get_length(self):
return self.val['m_length']
def to_string(self):
if self.is_8bit():
return lstring_to_string(self.val['m_data8'], self.get_length())
return ustring_to_string(self.val['m_data16'], self.get_length())
def is_8bit(self):
return self.val['m_hashAndFlags'] & self.val['s_hashFlag8BitBuffer']
class WTFStringPrinter(StringPrinter):
"Print a WTF::String"
def stringimpl_ptr(self):
return self.val['m_impl']['m_ptr']
def get_length(self):
if not self.stringimpl_ptr():
return 0
return WTFStringImplPrinter(self.stringimpl_ptr().dereference()).get_length()
def to_string(self):
if not self.stringimpl_ptr():
return '(null)'
return self.stringimpl_ptr().dereference()
class JSCIdentifierPrinter(StringPrinter):
"Print a JSC::Identifier"
def to_string(self):
return WTFStringPrinter(self.val['m_string']).to_string()
class JSCJSStringPrinter(StringPrinter):
"Print a JSC::JSString"
def to_string(self):
if self.val['m_length'] == 0:
return ''
return WTFStringImplPrinter(self.val['m_value']).to_string()
class WebCoreKURLGooglePrivatePrinter(StringPrinter):
"Print a WebCore::KURLGooglePrivate"
def to_string(self):
return WTFCStringPrinter(self.val['m_utf8']).to_string()
class WebCoreQualifiedNamePrinter(StringPrinter):
"Print a WebCore::QualifiedName"
def __init__(self, val):
super(WebCoreQualifiedNamePrinter, self).__init__(val)
self.prefix_length = 0
self.length = 0
if self.val['m_impl']:
self.prefix_printer = WTFStringPrinter(
self.val['m_impl']['m_prefix']['m_string'])
self.local_name_printer = WTFStringPrinter(
self.val['m_impl']['m_localName']['m_string'])
self.prefix_length = self.prefix_printer.get_length()
if self.prefix_length > 0:
self.length = (self.prefix_length + 1 +
self.local_name_printer.get_length())
else:
self.length = self.local_name_printer.get_length()
def get_length(self):
return self.length
def to_string(self):
if self.get_length() == 0:
return "(null)"
else:
if self.prefix_length > 0:
return (self.prefix_printer.to_string() + ":" +
self.local_name_printer.to_string())
else:
return self.local_name_printer.to_string()
class WTFVectorPrinter:
"""Pretty Printer for a WTF::Vector.
The output of this pretty printer is similar to the output of std::vector's
pretty printer, which is bundled in gcc.
Example gdb session should look like:
(gdb) p v
$3 = WTF::Vector of length 7, capacity 16 = {7, 17, 27, 37, 47, 57, 67}
(gdb) set print elements 3
(gdb) p v
$6 = WTF::Vector of length 7, capacity 16 = {7, 17, 27...}
(gdb) set print array
(gdb) p v
$7 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27
...
}
(gdb) set print elements 200
(gdb) p v
$8 = WTF::Vector of length 7, capacity 16 = {
7,
17,
27,
37,
47,
57,
67
}
"""
class Iterator:
def __init__(self, start, finish):
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
if self.item == self.finish:
raise StopIteration
count = self.count
self.count += 1
element = self.item.dereference()
self.item += 1
return ('[%d]' % count, element)
def __init__(self, val):
self.val = val
def children(self):
start = self.val['m_buffer']
return self.Iterator(start, start + self.val['m_size'])
def to_string(self):
return ('%s of length %d, capacity %d'
% ('WTF::Vector', self.val['m_size'], self.val['m_capacity']))
def display_hint(self):
return 'array'
def add_pretty_printers():
pretty_printers = (
(re.compile("^WTF::Vector<.*>$"), WTFVectorPrinter),
(re.compile("^WTF::AtomicString$"), WTFAtomicStringPrinter),
(re.compile("^WTF::CString$"), WTFCStringPrinter),
(re.compile("^WTF::String$"), WTFStringPrinter),
(re.compile("^WTF::StringImpl$"), WTFStringImplPrinter),
(re.compile("^WebCore::KURLGooglePrivate$"), WebCoreKURLGooglePrivatePrinter),
(re.compile("^WebCore::QualifiedName$"), WebCoreQualifiedNamePrinter),
(re.compile("^JSC::Identifier$"), JSCIdentifierPrinter),
(re.compile("^JSC::JSString$"), JSCJSStringPrinter),
)
def lookup_function(val):
"""Function used to load pretty printers; will be passed to GDB."""
type = val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target()
type = type.unqualified().strip_typedefs()
tag = type.tag
if tag:
for function, pretty_printer in pretty_printers:
if function.search(tag):
return pretty_printer(val)
if type.code == gdb.TYPE_CODE_PTR:
name = str(type.target().unqualified())
if name == 'UChar':
return UCharStringPrinter(val)
if name == 'LChar':
return LCharStringPrinter(val)
return None
gdb.pretty_printers.append(lookup_function)
add_pretty_printers()
class PrintPathToRootCommand(gdb.Command):
"""Command for printing WebKit Node trees.
Usage: printpathtoroot variable_name"""
def __init__(self):
super(PrintPathToRootCommand, self).__init__("printpathtoroot",
gdb.COMMAND_SUPPORT,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
element_type = gdb.lookup_type('WebCore::Element')
node_type = gdb.lookup_type('WebCore::Node')
frame = gdb.selected_frame()
try:
val = gdb.Frame.read_var(frame, arg)
except:
print "No such variable, or invalid type"
return
target_type = str(val.type.target().strip_typedefs())
if target_type == str(node_type):
stack = []
while val:
stack.append([val,
val.cast(element_type.pointer()).dereference()['m_tagName']])
val = val.dereference()['m_parent']
padding = ''
while len(stack) > 0:
pair = stack.pop()
print padding, pair[1], pair[0]
padding = padding + ' '
else:
print 'Sorry: I don\'t know how to deal with %s yet.' % target_type
PrintPathToRootCommand()
| bsd-3-clause |
p0lyb1us/polybius.fyi | reveal.js/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 327 | 132617 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.generator.ninja as ninja_generator
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
from gyp.common import OrderedSet
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
'msvs_enable_winrt',
'msvs_requires_importlibrary',
'msvs_enable_winphone',
'msvs_application_type_revision',
'msvs_target_platform_version',
'msvs_target_platform_minversion',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _ConfigWindowsTargetPlatformVersion(config_data):
ver = config_data.get('msvs_windows_target_platform_version')
if not ver or re.match(r'^\d+', ver):
return ver
for key in [r'HKLM\Software\Microsoft\Microsoft SDKs\Windows\%s',
r'HKLM\Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows\%s']:
sdkdir = MSVSVersion._RegistryGetValue(key % ver, 'InstallationFolder')
if not sdkdir:
continue
version = MSVSVersion._RegistryGetValue(key % ver, 'ProductVersion') or ''
# find a matching entry in sdkdir\include
names = sorted([x for x in os.listdir(r'%s\include' % sdkdir) \
if x.startswith(version)], reverse = True)
return names[0]
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False)
def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
# Remove trigger_file from excluded_sources to let the rule be triggered
# (e.g. rule trigger ax_enums.idl is added to excluded_sources
# because it's also in an action's inputs in the same project)
excluded_sources.discard(_FixPath(trigger_file))
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not is_msbuild:
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.asm':
group = 'masm'
element = 'MASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
properties = [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
properties[0].append(['PreferredToolArchitecture', 'x64'])
if spec.get('msvs_enable_winrt'):
properties[0].append(['DefaultLanguage', 'en-US'])
properties[0].append(['AppContainerApplication', 'true'])
if spec.get('msvs_application_type_revision'):
app_type_revision = spec.get('msvs_application_type_revision')
properties[0].append(['ApplicationTypeRevision', app_type_revision])
else:
properties[0].append(['ApplicationTypeRevision', '8.1'])
if spec.get('msvs_target_platform_version'):
target_platform_version = spec.get('msvs_target_platform_version')
properties[0].append(['WindowsTargetPlatformVersion',
target_platform_version])
if spec.get('msvs_target_platform_minversion'):
target_platform_minversion = spec.get('msvs_target_platform_minversion')
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_minversion])
else:
properties[0].append(['WindowsTargetPlatformMinVersion',
target_platform_version])
if spec.get('msvs_enable_winphone'):
properties[0].append(['ApplicationType', 'Windows Phone'])
else:
properties[0].append(['ApplicationType', 'Windows Store'])
platform_name = None
msvs_windows_target_platform_version = None
for configuration in spec['configurations'].itervalues():
platform_name = platform_name or _ConfigPlatform(configuration)
msvs_windows_target_platform_version = \
msvs_windows_target_platform_version or \
_ConfigWindowsTargetPlatformVersion(configuration)
if platform_name and msvs_windows_target_platform_version:
break
if platform_name == 'ARM':
properties[0].append(['WindowsSDKDesktopARMSupport', 'true'])
if msvs_windows_target_platform_version:
properties[0].append(['WindowsTargetPlatformVersion', \
str(msvs_windows_target_platform_version)])
return properties
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
if 'msvs_enable_winrt' not in spec :
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'Midl',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
else:
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
# Turn off WinRT compilation
_ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
# Turn on import libraries if appropriate
if spec.get('msvs_requires_importlibrary'):
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded):
groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
'rule_dependency']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
rule_dependencies, extension_to_rule_name,
sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
if config.get('msvs_use_library_dependency_inputs', 0):
project_ref.append(['UseLibraryDependencyInputs', 'true'])
break
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
rule_dependencies = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, rule_dependencies,
extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(rules, sources, excluded_sources, True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
rule_dependencies,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
import_masm_props_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
import_masm_targets_section = [
['Import',
{'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
if spec.get('msvs_enable_winphone'):
content += _GetMSBuildLocalProperties('v120_wp81')
else:
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += import_masm_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += import_masm_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
for c in commands])
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| gpl-3.0 |
faroit/mir_eval | tests/test_display.py | 4 | 11202 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Unit tests for the display module'''
# For testing purposes, clobber the rcfile
import matplotlib
matplotlib.use('Agg') # nopep8
import matplotlib.pyplot as plt
import numpy as np
# Import the hacked image comparison module
from mpl_ic import image_comparison
from nose.tools import raises
# We'll make a decorator to handle style contexts
from decorator import decorator
import mir_eval
import mir_eval.display
from mir_eval.io import load_labeled_intervals
from mir_eval.io import load_valued_intervals
from mir_eval.io import load_labeled_events
from mir_eval.io import load_ragged_time_series
from mir_eval.io import load_wav
@decorator
def styled(f, *args, **kwargs):
matplotlib.rcdefaults()
return f(*args, **kwargs)
@image_comparison(baseline_images=['segment'], extensions=['png'])
@styled
def test_display_segment():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=False)
# Draw a legend
plt.legend()
@image_comparison(baseline_images=['segment_text'], extensions=['png'])
@styled
def test_display_segment_text():
plt.figure()
# Load some segment data
intervals, labels = load_labeled_intervals('data/segment/ref00.lab')
# Plot the segments with no labels
mir_eval.display.segments(intervals, labels, text=True)
@image_comparison(baseline_images=['labeled_intervals'], extensions=['png'])
@styled
def test_display_labeled_intervals():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
mir_eval.display.labeled_intervals(intervals, labels)
@image_comparison(baseline_images=['labeled_intervals_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_noextend():
plt.figure()
# Load some chord data
intervals, labels = load_labeled_intervals('data/chord/ref01.lab')
# Plot the chords with nothing fancy
ax = plt.axes()
ax.set_yticklabels([])
mir_eval.display.labeled_intervals(intervals, labels,
label_set=[],
extend_labels=False,
ax=ax)
@image_comparison(baseline_images=['labeled_intervals_compare'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimates using label set extension
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_noextend'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_noextend():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
# Plot reference and estimate, but only use the reference labels
mir_eval.display.labeled_intervals(ref_int, ref_labels,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
extend_labels=False,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_intervals_compare_common'],
extensions=['png'])
@styled
def test_display_labeled_intervals_compare_common():
plt.figure()
# Load some chord data
ref_int, ref_labels = load_labeled_intervals('data/chord/ref01.lab')
est_int, est_labels = load_labeled_intervals('data/chord/est01.lab')
label_set = list(sorted(set(ref_labels) | set(est_labels)))
# Plot reference and estimate with a common label set
mir_eval.display.labeled_intervals(ref_int, ref_labels,
label_set=label_set,
alpha=0.5, label='Reference')
mir_eval.display.labeled_intervals(est_int, est_labels,
label_set=label_set,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['hierarchy_nolabel'], extensions=['png'])
@styled
def test_display_hierarchy_nolabel():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1])
plt.legend()
@image_comparison(baseline_images=['hierarchy_label'], extensions=['png'])
@styled
def test_display_hierarchy_label():
plt.figure()
# Load some chord data
int0, lab0 = load_labeled_intervals('data/hierarchy/ref00.lab')
int1, lab1 = load_labeled_intervals('data/hierarchy/ref01.lab')
# Plot reference and estimate with a common label set
mir_eval.display.hierarchy([int0, int1],
[lab0, lab1],
levels=['Large', 'Small'])
plt.legend()
@image_comparison(baseline_images=['pitch_hz'], extensions=['png'])
@styled
def test_pitch_hz():
plt.figure()
ref_times, ref_freqs = load_labeled_events('data/melody/ref00.txt')
est_times, est_freqs = load_labeled_events('data/melody/est00.txt')
# Plot pitches on a Hz scale
mir_eval.display.pitch(ref_times, ref_freqs, unvoiced=True,
label='Reference')
mir_eval.display.pitch(est_times, est_freqs, unvoiced=True,
label='Estimate')
plt.legend()
@image_comparison(baseline_images=['pitch_midi'], extensions=['png'])
@styled
def test_pitch_midi():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['pitch_midi_hz'], extensions=['png'])
@styled
def test_pitch_midi_hz():
plt.figure()
times, freqs = load_labeled_events('data/melody/ref00.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.pitch(times, freqs, midi=True)
mir_eval.display.ticker_pitch()
@image_comparison(baseline_images=['multipitch_hz_unvoiced'],
extensions=['png'])
@styled
def test_multipitch_hz_unvoiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=True)
@image_comparison(baseline_images=['multipitch_hz_voiced'], extensions=['png'])
@styled
def test_multipitch_hz_voiced():
plt.figure()
times, pitches = load_ragged_time_series('data/multipitch/est01.txt')
mir_eval.display.multipitch(times, pitches, midi=False, unvoiced=False)
@image_comparison(baseline_images=['multipitch_midi'], extensions=['png'])
@styled
def test_multipitch_midi():
plt.figure()
ref_t, ref_p = load_ragged_time_series('data/multipitch/ref01.txt')
est_t, est_p = load_ragged_time_series('data/multipitch/est01.txt')
# Plot pitches on a midi scale with note tickers
mir_eval.display.multipitch(ref_t, ref_p, midi=True,
alpha=0.5, label='Reference')
mir_eval.display.multipitch(est_t, est_p, midi=True,
alpha=0.5, label='Estimate')
plt.legend()
@image_comparison(baseline_images=['piano_roll'], extensions=['png'])
@styled
def test_pianoroll():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
mir_eval.display.piano_roll(ref_t, ref_p,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, est_p,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['piano_roll_midi'], extensions=['png'])
@styled
def test_pianoroll_midi():
plt.figure()
ref_t, ref_p = load_valued_intervals('data/transcription/ref04.txt')
est_t, est_p = load_valued_intervals('data/transcription/est04.txt')
ref_midi = mir_eval.util.hz_to_midi(ref_p)
est_midi = mir_eval.util.hz_to_midi(est_p)
mir_eval.display.piano_roll(ref_t, midi=ref_midi,
label='Reference', alpha=0.5)
mir_eval.display.piano_roll(est_t, midi=est_midi,
label='Estimate', alpha=0.5, facecolor='r')
plt.legend()
@image_comparison(baseline_images=['ticker_midi_zoom'], extensions=['png'])
@styled
def test_ticker_midi_zoom():
plt.figure()
plt.plot(np.arange(3))
mir_eval.display.ticker_notes()
@image_comparison(baseline_images=['separation'], extensions=['png'])
@styled
def test_separation():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs)
@image_comparison(baseline_images=['separation_label'], extensions=['png'])
@styled
def test_separation_label():
plt.figure()
x0, fs = load_wav('data/separation/ref05/0.wav')
x1, fs = load_wav('data/separation/ref05/1.wav')
x2, fs = load_wav('data/separation/ref05/2.wav')
mir_eval.display.separation([x0, x1, x2], fs=fs,
labels=['Alice', 'Bob', 'Carol'])
plt.legend()
@image_comparison(baseline_images=['events'], extensions=['png'])
@styled
def test_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:30]
beats_est = mir_eval.io.load_events('data/beat/est00.txt')[:30]
# Plot both with labels
mir_eval.display.events(beats_ref, label='reference')
mir_eval.display.events(beats_est, label='estimate')
plt.legend()
@image_comparison(baseline_images=['labeled_events'], extensions=['png'])
@styled
def test_labeled_events():
plt.figure()
# Load some event data
beats_ref = mir_eval.io.load_events('data/beat/ref00.txt')[:10]
labels = list('abcdefghijklmnop')
# Plot both with labels
mir_eval.display.events(beats_ref, labels)
@raises(ValueError)
def test_pianoroll_nopitch_nomidi():
# Issue 214
mir_eval.display.piano_roll([[0, 1]])
| mit |
chromium/chromium | tools/site_compare/commands/timeload.py | 189 | 4936 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare command to time page loads
Loads a series of URLs in a series of browsers (and browser versions)
and measures how long the page takes to load in each. Outputs a
comma-delimited file. The first line is "URL,[browser names", each
additional line is a URL follored by comma-delimited times (in seconds),
or the string "timeout" or "crashed".
"""
import os # Functions for walking the directory tree
import tempfile # Get a temporary directory to hold intermediates
import command_line
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
def CreateCommand(cmdline):
"""Inserts the command and arguments into a command line for parsing."""
cmd = cmdline.AddCommand(
["timeload"],
"Measures how long a series of URLs takes to load in one or more browsers.",
None,
ExecuteTimeLoad)
cmd.AddArgument(
["-b", "--browsers"], "List of browsers to use. Comma-separated",
type="string", required=True)
cmd.AddArgument(
["-bp", "--browserpaths"], "List of paths to browsers. Comma-separated",
type="string", required=False)
cmd.AddArgument(
["-bv", "--browserversions"],
"List of versions of browsers. Comma-separated",
type="string", required=False)
cmd.AddArgument(
["-u", "--url"], "URL to time")
cmd.AddArgument(
["-l", "--list"], "List of URLs to time", type="readfile")
cmd.AddMutualExclusion(["--url", "--list"])
cmd.AddArgument(
["-s", "--startline"], "First line of URL list", type="int")
cmd.AddArgument(
["-e", "--endline"], "Last line of URL list (exclusive)", type="int")
cmd.AddArgument(
["-c", "--count"], "Number of lines of URL file to use", type="int")
cmd.AddDependency("--startline", "--list")
cmd.AddRequiredGroup(["--url", "--list"])
cmd.AddDependency("--endline", "--list")
cmd.AddDependency("--count", "--list")
cmd.AddMutualExclusion(["--count", "--endline"])
cmd.AddDependency("--count", "--startline")
cmd.AddArgument(
["-t", "--timeout"], "Amount of time (seconds) to wait for browser to "
"finish loading",
type="int", default=60)
cmd.AddArgument(
["-log", "--logfile"], "File to write output", type="string", required=True)
cmd.AddArgument(
["-sz", "--size"], "Browser window size", default=(800, 600), type="coords")
def ExecuteTimeLoad(command):
"""Executes the TimeLoad command."""
browsers = command["--browsers"].split(",")
num_browsers = len(browsers)
if command["--browserversions"]:
browser_versions = command["--browserversions"].split(",")
else:
browser_versions = [None] * num_browsers
if command["--browserpaths"]:
browser_paths = command["--browserpaths"].split(",")
else:
browser_paths = [None] * num_browsers
if len(browser_versions) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
if len(browser_paths) != num_browsers:
raise ValueError(
"--browserversions must be same length as --browser_paths")
if [b for b in browsers if b not in ["chrome", "ie", "firefox"]]:
raise ValueError("unknown browsers: %r" % b)
scraper_list = []
for b in xrange(num_browsers):
version = browser_versions[b]
if not version: version = None
scraper = scrapers.GetScraper( (browsers[b], version) )
if not scraper:
raise ValueError("could not find scraper for (%r, %r)" %
(browsers[b], version))
scraper_list.append(scraper)
if command["--url"]:
url_list = [command["--url"]]
else:
startline = command["--startline"]
if command["--count"]:
endline = startline+command["--count"]
else:
endline = command["--endline"]
url_list = [url.strip() for url in
open(command["--list"], "r").readlines()[startline:endline]]
log_file = open(command["--logfile"], "w")
log_file.write("URL")
for b in xrange(num_browsers):
log_file.write(",%s" % browsers[b])
if browser_versions[b]: log_file.write(" %s" % browser_versions[b])
log_file.write("\n")
results = {}
for url in url_list:
results[url] = [None] * num_browsers
for b in xrange(num_browsers):
result = scraper_list[b].Time(url_list, command["--size"],
command["--timeout"],
path=browser_paths[b])
for (url, time) in result:
results[url][b] = time
# output the results
for url in url_list:
log_file.write(url)
for b in xrange(num_browsers):
log_file.write(",%r" % results[url][b])
| bsd-3-clause |
mviitanen/marsmod | mcp/temp-python/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| gpl-2.0 |
Ichag/odoo | addons/product_margin/__init__.py | 444 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import product_margin
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
obol/obol | contrib/pyminer/pyminer.py | 766 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
AndrewGrossman/django | django/db/models/fields/related_lookups.py | 287 | 6153 | from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual,
)
class MultiColSource(object):
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
if getattr(lhs.output_field, 'primary_key', False):
return (value.pk,)
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_lookup. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedIn, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
return super(RelatedIn, self).as_sql(compiler, connection)
class RelatedLookupMixin(object):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedLookupMixin, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super(RelatedLookupMixin, self).as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
| bsd-3-clause |
epandurski/django | tests/model_meta/models.py | 99 | 4711 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Relation(models.Model):
pass
class AbstractPerson(models.Model):
# DATA fields
data_abstract = models.CharField(max_length=10)
fk_abstract = models.ForeignKey(Relation, related_name='fk_abstract_rel')
# M2M fields
m2m_abstract = models.ManyToManyField(Relation, related_name='m2m_abstract_rel')
friends_abstract = models.ManyToManyField('self', related_name='friends_abstract', symmetrical=True)
following_abstract = models.ManyToManyField('self', related_name='followers_abstract', symmetrical=False)
# VIRTUAL fields
data_not_concrete_abstract = models.ForeignObject(
Relation,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='fo_abstract_rel',
)
# GFK fields
content_type_abstract = models.ForeignKey(ContentType, related_name='+')
object_id_abstract = models.PositiveIntegerField()
content_object_abstract = GenericForeignKey('content_type_abstract', 'object_id_abstract')
# GR fields
generic_relation_abstract = GenericRelation(Relation)
class Meta:
abstract = True
class BasePerson(AbstractPerson):
# DATA fields
data_base = models.CharField(max_length=10)
fk_base = models.ForeignKey(Relation, related_name='fk_base_rel')
# M2M fields
m2m_base = models.ManyToManyField(Relation, related_name='m2m_base_rel')
friends_base = models.ManyToManyField('self', related_name='friends_base', symmetrical=True)
following_base = models.ManyToManyField('self', related_name='followers_base', symmetrical=False)
# VIRTUAL fields
data_not_concrete_base = models.ForeignObject(
Relation,
from_fields=['base_non_concrete_id'],
to_fields=['id'],
related_name='fo_base_rel',
)
# GFK fields
content_type_base = models.ForeignKey(ContentType, related_name='+')
object_id_base = models.PositiveIntegerField()
content_object_base = GenericForeignKey('content_type_base', 'object_id_base')
# GR fields
generic_relation_base = GenericRelation(Relation)
class Person(BasePerson):
# DATA fields
data_inherited = models.CharField(max_length=10)
fk_inherited = models.ForeignKey(Relation, related_name='fk_concrete_rel')
# M2M Fields
m2m_inherited = models.ManyToManyField(Relation, related_name='m2m_concrete_rel')
friends_inherited = models.ManyToManyField('self', related_name='friends_concrete', symmetrical=True)
following_inherited = models.ManyToManyField('self', related_name='followers_concrete', symmetrical=False)
# VIRTUAL fields
data_not_concrete_inherited = models.ForeignObject(
Relation,
from_fields=['model_non_concrete_id'],
to_fields=['id'],
related_name='fo_concrete_rel',
)
# GFK fields
content_type_concrete = models.ForeignKey(ContentType, related_name='+')
object_id_concrete = models.PositiveIntegerField()
content_object_concrete = GenericForeignKey('content_type_concrete', 'object_id_concrete')
# GR fields
generic_relation_concrete = GenericRelation(Relation)
class ProxyPerson(Person):
class Meta:
proxy = True
class Relating(models.Model):
# ForeignKey to BasePerson
baseperson = models.ForeignKey(BasePerson, related_name='relating_baseperson')
baseperson_hidden = models.ForeignKey(BasePerson, related_name='+')
# ForeignKey to Person
person = models.ForeignKey(Person, related_name='relating_person')
person_hidden = models.ForeignKey(Person, related_name='+')
# ForeignKey to ProxyPerson
proxyperson = models.ForeignKey(ProxyPerson, related_name='relating_proxyperson')
proxyperson_hidden = models.ForeignKey(ProxyPerson, related_name='+')
# ManyToManyField to BasePerson
basepeople = models.ManyToManyField(BasePerson, related_name='relating_basepeople')
basepeople_hidden = models.ManyToManyField(BasePerson, related_name='+')
# ManyToManyField to Person
people = models.ManyToManyField(Person, related_name='relating_people')
people_hidden = models.ManyToManyField(Person, related_name='+')
# ParentListTests models
class CommonAncestor(models.Model):
pass
class FirstParent(CommonAncestor):
first_ancestor = models.OneToOneField(CommonAncestor, primary_key=True, parent_link=True)
class SecondParent(CommonAncestor):
second_ancestor = models.OneToOneField(CommonAncestor, primary_key=True, parent_link=True)
class Child(FirstParent, SecondParent):
pass
| bsd-3-clause |
Fokko/incubator-airflow | tests/gcp/operators/test_bigtable_system.py | 1 | 1567 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests.gcp.operators.test_bigtable_system_helper import GCPBigtableTestHelper
from tests.gcp.utils.gcp_authenticator import GCP_BIGTABLE_KEY
from tests.test_utils.gcp_system_helpers import GCP_DAG_FOLDER, provide_gcp_context, skip_gcp_system
from tests.test_utils.system_tests_class import SystemTest
@skip_gcp_system(GCP_BIGTABLE_KEY, require_local_executor=True)
class BigTableExampleDagsSystemTest(SystemTest):
helper = GCPBigtableTestHelper()
@provide_gcp_context(GCP_BIGTABLE_KEY)
def test_run_example_dag_gcs_bigtable(self):
self.run_dag('example_gcp_bigtable_operators', GCP_DAG_FOLDER)
@provide_gcp_context(GCP_BIGTABLE_KEY)
def tearDown(self):
self.helper.delete_instance()
super().tearDown()
| apache-2.0 |
frouty/odoo_oph | addons/web/tests/test_serving_base.py | 138 | 1031 | # -*- coding: utf-8 -*-
import random
import unittest2
from ..controllers.main import module_topological_sort as sort
def sample(population):
return random.sample(
population,
random.randint(0, min(len(population), 5)))
class TestModulesLoading(unittest2.TestCase):
def setUp(self):
self.mods = map(str, range(1000))
def test_topological_sort(self):
random.shuffle(self.mods)
modules = [
(k, sample(self.mods[:i]))
for i, k in enumerate(self.mods)]
random.shuffle(modules)
ms = dict(modules)
seen = set()
sorted_modules = sort(ms)
for module in sorted_modules:
deps = ms[module]
self.assertGreaterEqual(
seen, set(deps),
'Module %s (index %d), ' \
'missing dependencies %s from loaded modules %s' % (
module, sorted_modules.index(module), deps, seen
))
seen.add(module)
| agpl-3.0 |
ivanlyon/exercises | test/test_k_pebblesolitaire.py | 1 | 1266 | import io
import unittest
from unittest.mock import patch
from kattis import k_pebblesolitaire
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('5')
inputs.append('---oo-------')
inputs.append('-o--o-oo----')
inputs.append('-o----ooo---')
inputs.append('oooooooooooo')
inputs.append('oooooooooo-o')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('1')
outputs.append('2')
outputs.append('3')
outputs.append('12')
outputs.append('1')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_pebblesolitaire.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| mit |
jimbobhickville/taskflow | taskflow/examples/alphabet_soup.py | 2 | 3255 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fractions
import functools
import logging
import os
import string
import sys
import time
logging.basicConfig(level=logging.ERROR)
self_dir = os.path.abspath(os.path.dirname(__file__))
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
sys.path.insert(0, self_dir)
from taskflow import engines
from taskflow import exceptions
from taskflow.patterns import linear_flow
from taskflow import task
# In this example we show how a simple linear set of tasks can be executed
# using local processes (and not threads or remote workers) with minimal (if
# any) modification to those tasks to make them safe to run in this mode.
#
# This is useful since it allows further scaling up your workflows when thread
# execution starts to become a bottleneck (which it can start to be due to the
# GIL in python). It also offers a intermediary scalable runner that can be
# used when the scale and/or setup of remote workers is not desirable.
def progress_printer(task, event_type, details):
# This callback, attached to each task will be called in the local
# process (not the child processes)...
progress = details.pop('progress')
progress = int(progress * 100.0)
print("Task '%s' reached %d%% completion" % (task.name, progress))
class AlphabetTask(task.Task):
# Second delay between each progress part.
_DELAY = 0.1
# This task will run in X main stages (each with a different progress
# report that will be delivered back to the running process...). The
# initial 0% and 100% are triggered automatically by the engine when
# a task is started and finished (so that's why those are not emitted
# here).
_PROGRESS_PARTS = [fractions.Fraction("%s/5" % x) for x in range(1, 5)]
def execute(self):
for p in self._PROGRESS_PARTS:
self.update_progress(p)
time.sleep(self._DELAY)
print("Constructing...")
soup = linear_flow.Flow("alphabet-soup")
for letter in string.ascii_lowercase:
abc = AlphabetTask(letter)
abc.notifier.register(task.EVENT_UPDATE_PROGRESS,
functools.partial(progress_printer, abc))
soup.add(abc)
try:
print("Loading...")
e = engines.load(soup, engine='parallel', executor='processes')
print("Compiling...")
e.compile()
print("Preparing...")
e.prepare()
print("Running...")
e.run()
print("Done: %s" % e.statistics)
except exceptions.NotImplementedError as e:
print(e)
| apache-2.0 |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/distutils/__init__.py | 24 | 3746 | import os
import sys
import warnings
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
exec(open(os.path.join(distutils_path, '__init__.py')).read())
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| mit |
MatthieuBizien/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
wkschwartz/django | tests/gis_tests/geo3d/tests.py | 22 | 12727 | import os
import re
from django.contrib.gis.db.models import Extent3D, Union
from django.contrib.gis.db.models.functions import (
AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate,
)
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.test import TestCase, skipUnlessDBFeature
from ..utils import FuncTestMixin
from .models import (
City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D,
MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D,
)
data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DLoadingHelper:
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([coord[:2] for coord in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(
name=name, point=Point(*pnt_data, srid=4326), pointg=Point(*pnt_data, srid=4326),
)
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
@skipUnlessDBFeature("supports_3d_storage")
class Geo3DTest(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
https://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
# Testing both geometry and geography fields
self.assertTrue(city.point.hasz)
self.assertTrue(city.pointg.hasz)
self.assertEqual(city.point.z, pnt_data[2])
self.assertEqual(city.pointg.z, pnt_data[2])
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
# Import here as GDAL is required for those imports
from django.contrib.gis.utils import LayerMapError, LayerMapping
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
with self.assertRaises(LayerMapError):
LayerMapping(Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
check_extent3d(extent)
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
@skipUnlessDBFeature("supports_3d_functions")
class Geo3DFunctionsTests(FuncTestMixin, Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox')
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox')
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| bsd-3-clause |
justacec/bokeh | examples/models/gauges.py | 6 | 4426 | from __future__ import print_function
from math import pi, sin, cos
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
from bokeh.models.glyphs import Circle, Arc, Ray, Text
from bokeh.models import ColumnDataSource, Range1d, Plot
xdr = Range1d(start=-1.25, end=1.25)
ydr = Range1d(start=-1.25, end=1.25)
plot = Plot(title="Speedometer", x_range=xdr, y_range=ydr, plot_width=600, plot_height=600)
start_angle = pi + pi/4
end_angle = -pi/4
max_kmh = 250
max_mph = max_kmh*0.621371
major_step, minor_step = 25, 5
plot.add_glyph(Circle(x=0, y=0, radius=1.00, fill_color="white", line_color="black"))
plot.add_glyph(Circle(x=0, y=0, radius=0.05, fill_color="gray", line_color="black"))
plot.add_glyph(Text(x=0, y=+0.15, text=["km/h"], text_color="red", text_align="center", text_baseline="bottom", text_font_style="bold"))
plot.add_glyph(Text(x=0, y=-0.15, text=["mph"], text_color="blue", text_align="center", text_baseline="top", text_font_style="bold"))
def data(value):
"""Shorthand to override default units with "data", for e.g. `Ray.length`. """
return dict(value=value, units="data")
def speed_to_angle(speed, units):
max_speed = max_kmh if units == "kmh" else max_mph
speed = min(max(speed, 0), max_speed)
total_angle = start_angle - end_angle
angle = total_angle*float(speed)/max_speed
return start_angle - angle
def add_needle(speed, units):
angle = speed_to_angle(speed, units)
plot.add_glyph(Ray(x=0, y=0, length=data(0.75), angle=angle, line_color="black", line_width=3))
plot.add_glyph(Ray(x=0, y=0, length=data(0.10), angle=angle-pi, line_color="black", line_width=3))
def polar_to_cartesian(r, alpha):
return r*cos(alpha), r*sin(alpha)
def add_gauge(radius, max_value, length, direction, color, major_step, minor_step):
major_angles, minor_angles = [], []
major_labels, minor_labels = [], []
total_angle = start_angle - end_angle
major_angle_step = float(major_step)/max_value*total_angle
minor_angle_step = float(minor_step)/max_value*total_angle
major_angle = 0
while major_angle <= total_angle:
major_angles.append(start_angle - major_angle)
major_angle += major_angle_step
minor_angle = 0
while minor_angle <= total_angle:
minor_angles.append(start_angle - minor_angle)
minor_angle += minor_angle_step
major_labels = [ major_step*i for i, _ in enumerate(major_angles) ]
minor_labels = [ minor_step*i for i, _ in enumerate(minor_angles) ]
n = major_step/minor_step
minor_angles = [ x for i, x in enumerate(minor_angles) if i % n != 0 ]
minor_labels = [ x for i, x in enumerate(minor_labels) if i % n != 0 ]
glyph = Arc(x=0, y=0, radius=radius, start_angle=start_angle, end_angle=end_angle, direction="clock", line_color=color, line_width=2)
plot.add_glyph(glyph)
rotation = 0 if direction == 1 else -pi
x, y = zip(*[ polar_to_cartesian(radius, angle) for angle in major_angles ])
angles = [ angle + rotation for angle in major_angles ]
source = ColumnDataSource(dict(x=x, y=y, angle=angles))
glyph = Ray(x="x", y="y", length=data(length), angle="angle", line_color=color, line_width=2)
plot.add_glyph(source, glyph)
x, y = zip(*[ polar_to_cartesian(radius, angle) for angle in minor_angles ])
angles = [ angle + rotation for angle in minor_angles ]
source = ColumnDataSource(dict(x=x, y=y, angle=angles))
glyph = Ray(x="x", y="y", length=data(length/2), angle="angle", line_color=color, line_width=1)
plot.add_glyph(source, glyph)
x, y = zip(*[ polar_to_cartesian(radius+2*length*direction, angle) for angle in major_angles ])
text_angles = [ angle - pi/2 for angle in major_angles ]
source = ColumnDataSource(dict(x=x, y=y, angle=text_angles, text=major_labels))
glyph = Text(x="x", y="y", angle="angle", text="text", text_align="center", text_baseline="middle")
plot.add_glyph(source, glyph)
add_gauge(0.75, max_kmh, 0.05, +1, "red", major_step, minor_step)
add_gauge(0.70, max_mph, 0.05, -1, "blue", major_step, minor_step)
add_needle(55, "kmh")
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
filename = "gauges.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Gauges"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
m1093782566/openstack_org_ceilometer | ceilometer/tests/test_neutronclient.py | 6 | 7952 | # Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import base
from ceilometer import neutron_client
class TestNeutronClient(base.BaseTestCase):
def setUp(self):
super(TestNeutronClient, self).setUp()
self.nc = neutron_client.Client()
@staticmethod
def fake_ports_list():
return {'ports':
[{'admin_state_up': True,
'device_id': '674e553b-8df9-4321-87d9-93ba05b93558',
'device_owner': 'network:router_gateway',
'extra_dhcp_opts': [],
'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442',
'mac_address': 'fa:16:3e:c5:35:93',
'name': '',
'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'status': 'ACTIVE',
'tenant_id': '89271fa581ab4380bf172f868c3615f9'},
]}
def test_port_get_all(self):
with mock.patch.object(self.nc.client, 'list_ports',
side_effect=self.fake_ports_list):
ports = self.nc.port_get_all()
self.assertEqual(1, len(ports))
self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442',
ports[0]['id'])
@staticmethod
def fake_networks_list():
return {'networks':
[{'admin_state_up': True,
'id': '298a3088-a446-4d5a-bad8-f92ecacd786b',
'name': 'public',
'provider:network_type': 'gre',
'provider:physical_network': None,
'provider:segmentation_id': 2,
'router:external': True,
'shared': False,
'status': 'ACTIVE',
'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'],
'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'},
]}
def test_network_get_all(self):
with mock.patch.object(self.nc.client, 'list_networks',
side_effect=self.fake_networks_list):
networks = self.nc.network_get_all()
self.assertEqual(1, len(networks))
self.assertEqual('298a3088-a446-4d5a-bad8-f92ecacd786b',
networks[0]['id'])
@staticmethod
def fake_pool_list():
return {'pools': [{'status': 'ACTIVE',
'lb_method': 'ROUND_ROBIN',
'protocol': 'HTTP',
'description': '',
'health_monitors': [],
'members': [],
'status_description': None,
'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'mylb',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'health_monitors_status': []},
]}
def test_pool_list(self):
with mock.patch.object(self.nc.client, 'list_pools',
side_effect=self.fake_pool_list):
pools = self.nc.pool_get_all()
self.assertEqual(1, len(pools))
self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a',
pools[0]['id'])
@staticmethod
def fake_vip_list():
return {'vips': [{'status': 'ACTIVE',
'status_description': None,
'protocol': 'HTTP',
'description': '',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'connection_limit': -1,
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'session_persistence': None,
'address': '10.0.0.2',
'protocol_port': 80,
'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291',
'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
'name': 'myvip'},
]}
def test_vip_list(self):
with mock.patch.object(self.nc.client, 'list_vips',
side_effect=self.fake_vip_list):
vips = self.nc.vip_get_all()
self.assertEqual(1, len(vips))
self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1',
vips[0]['id'])
@staticmethod
def fake_member_list():
return {'members': [{'status': 'ACTIVE',
'protocol_port': 80,
'weight': 1,
'admin_state_up': True,
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a',
'address': '10.0.0.3',
'status_description': None,
'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'},
]}
def test_member_list(self):
with mock.patch.object(self.nc.client, 'list_members',
side_effect=self.fake_member_list):
members = self.nc.member_get_all()
self.assertEqual(1, len(members))
self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b',
members[0]['id'])
@staticmethod
def fake_monitors_list():
return {'health_monitors':
[{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365',
'admin_state_up': True,
'tenant_id': "d5d2817dae6b42159be9b665b64beb0e",
'delay': 2,
'max_retries': 5,
'timeout': 5,
'pools': [],
'type': 'PING',
}]}
def test_monitor_list(self):
with mock.patch.object(self.nc.client, 'list_health_monitors',
side_effect=self.fake_monitors_list):
monitors = self.nc.health_monitor_get_all()
self.assertEqual(1, len(monitors))
self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365',
monitors[0]['id'])
@staticmethod
def fake_pool_stats(fake_pool):
return {'stats':
[{'active_connections': 1L,
'total_connections': 2L,
'bytes_in': 3L,
'bytes_out': 4L
}]}
def test_pool_stats(self):
with mock.patch.object(self.nc.client, 'retrieve_pool_stats',
side_effect=self.fake_pool_stats):
stats = self.nc.pool_stats('fake_pool')['stats']
self.assertEqual(1, len(stats))
self.assertEqual(1L, stats[0]['active_connections'])
self.assertEqual(2L, stats[0]['total_connections'])
self.assertEqual(3L, stats[0]['bytes_in'])
self.assertEqual(4L, stats[0]['bytes_out'])
| apache-2.0 |
keenon/jamr | scripts/smatch_v1_0/smatch_orig.py | 7 | 26821 | #!/usr/bin/env python
# encoding: utf-8
"""
smatch.py
Author: Shu Cai
Copyright(c) 2012. All rights reserved.
"""
import sys
import os
import time
import random
import amr
#import optparse
#import argparse #argparse only works for python 2.7. If you are using older versin of Python, you can use optparse instead.
iter_num=5 #global variable, total number of iteration
verbose=False #global variable, verbose output control
single_score=True #global variable, single score output control
pr_flag=False #global variable, output precision and recall
ERROR_LOG=sys.stderr
match_num_dict={} #key: match number tuples value: the matching number
def get_amr_line(input_f):
"""Read the amr file. AMRs are separated by a blank line."""
cur_amr=[]
has_content=False
for line in input_f:
if line[0]=="(" and len(cur_amr)!=0:
cur_amr=[]
if line.strip()=="":
if not has_content:
continue
else:
break
elif line.strip().startswith("#"):
# omit the comment in the AMR file
continue
else:
has_content=True
cur_amr.append(line.strip())
return "".join(cur_amr)
def build_arg_parser():
"""Build an argument parser using argparse"""
parser=argparse.ArgumentParser(description="Smatch calculator -- arguments")
parser.add_argument('-f',nargs=2,required=True,type=argparse.FileType('r'),help='Two files containing AMR pairs. AMRs in each file are separated by a single blank line')
parser.add_argument('-r',type=int,default=4,help='Restart number (Default:4)')
parser.add_argument('-v',action='store_true',help='Verbose output (Default:False)')
parser.add_argument('--ms',action='store_true',default=False,help='Output multiple scores (one AMR pair a score) instead of a single document-level smatch score (Default: False)')
parser.add_argument('--pr',action='store_true',default=False,help="Output precision and recall as well as the f-score. Default: false")
return parser
def build_arg_parser2():
"""Build an argument parser using optparse"""
usage_str="Smatch calculator -- arguments"
parser=optparse.OptionParser(usage=usage_str)
#parser.add_option("-h","--help",action="help",help="Smatch calculator -- arguments")
parser.add_option("-f","--files",nargs=2,dest="f",type="string",help='Two files containing AMR pairs. AMRs in each file are separated by a single blank line. This option is required.')
parser.add_option("-r","--restart",dest="r",type="int",help='Restart number (Default: 4)')
parser.add_option("-v","--verbose",action='store_true',dest="v",help='Verbose output (Default:False)')
parser.add_option("--ms","--multiple_score",action='store_true',dest="ms",help='Output multiple scores (one AMR pair a score) instead of a single document-level smatch score (Default: False)')
parser.add_option('--pr',"--precision_recall",action='store_true',dest="pr",help="Output precision and recall as well as the f-score. Default: false")
parser.set_defaults(r=4,v=False,ms=False,pr=False)
return parser
def compute_pool(test_instance,test_relation1,test_relation2,gold_instance,gold_relation1,gold_relation2,test_label,gold_label):
"""
compute the possible variable matching candidate (the match which may result in 1)
Args:
test_instance: intance triples in AMR 1
test_relation1: relation triples which contain only one variable in AMR 1
test_relation2: relation triples which contain two variables in AMR 1
gold_instance: instance triples in AMR 2
gold_relation1: relation triples which contain only one variable in AMR 2
gold_relations: relation triples which contain two variables in AMR 2
test_label: the prefix of the variable in AMR 1, e.g. a (variable a1, a2, a3...)
gold_label: the prefix of the variable in AMR 2, e.g. b (variable b1, b2, b3...)
Returns:
candidate_match: a list of candidate mapping variables. Each entry contains a set of the variables the variable can map to.
weight_dict: a dictionary which contains the matching triple number of every pair of variable mapping. """
len_test_inst=len(test_instance)
len_gold_inst=len(gold_instance)
len_test_rel1=len(test_relation1)
len_gold_rel1=len(gold_relation1)
len_test_rel2=len(test_relation2)
len_gold_rel2=len(gold_relation2)
candidate_match=[]
weight_dict={}
for i in range(0,len_test_inst):
candidate_match.append(set())
for i in range(0,len_test_inst):
for j in range(0,len_gold_inst):
if test_instance[i][0].lower()==gold_instance[j][0].lower() and test_instance[i][2].lower()==gold_instance[j][2].lower():
var1_num=int(test_instance[i][1][len(test_label):])
var2_num=int(gold_instance[j][1][len(gold_label):])
candidate_match[var1_num].add(var2_num)
cur_k=(var1_num,var2_num)
if cur_k in weight_dict:
weight_dict[cur_k][-1]+=1
else:
weight_dict[cur_k]={}
weight_dict[cur_k][-1]=1
for i in range(0,len_test_rel1):
for j in range(0,len_gold_rel1):
if test_relation1[i][0].lower()==gold_relation1[j][0].lower() and test_relation1[i][2].lower()==gold_relation1[j][2].lower():
var1_num=int(test_relation1[i][1][len(test_label):])
var2_num=int(gold_relation1[j][1][len(gold_label):])
candidate_match[var1_num].add(var2_num)
cur_k=(var1_num,var2_num)
if cur_k in weight_dict:
weight_dict[cur_k][-1]+=1
else:
weight_dict[cur_k]={}
weight_dict[cur_k][-1]=1
for i in range(0,len_test_rel2):
for j in range(0,len_gold_rel2):
if test_relation2[i][0].lower()==gold_relation2[j][0].lower():
var1_num_test=int(test_relation2[i][1][len(test_label):])
var1_num_gold=int(gold_relation2[j][1][len(gold_label):])
var2_num_test=int(test_relation2[i][2][len(test_label):])
var2_num_gold=int(gold_relation2[j][2][len(gold_label):])
candidate_match[var1_num_test].add(var1_num_gold)
candidate_match[var2_num_test].add(var2_num_gold)
cur_k1=(var1_num_test,var1_num_gold)
cur_k2=(var2_num_test,var2_num_gold)
if cur_k2!=cur_k1:
if cur_k1 in weight_dict:
if cur_k2 in weight_dict[cur_k1]:
weight_dict[cur_k1][cur_k2]+=1
else:
weight_dict[cur_k1][cur_k2]=1
else:
weight_dict[cur_k1]={}
weight_dict[cur_k1][-1]=0
weight_dict[cur_k1][cur_k2]=1
if cur_k2 in weight_dict:
if cur_k1 in weight_dict[cur_k2]:
weight_dict[cur_k2][cur_k1]+=1
else:
weight_dict[cur_k2][cur_k1]=1
else:
weight_dict[cur_k2]={}
weight_dict[cur_k2][-1]=0
weight_dict[cur_k2][cur_k1]=1
else:
#cycle
if cur_k1 in weight_dict:
weight_dict[cur_k1][-1]+=1
else:
weight_dict[cur_k1]={}
weight_dict[cur_k1][-1]=1
return (candidate_match, weight_dict)
def init_match(candidate_match,test_instance,gold_instance):
"""Initialize match based on the word match
Args:
candidate_match: candidate variable match list
test_instance: test instance
gold_instance: gold instance
Returns:
intialized match result"""
random.seed()
matched_dict={}
result=[]
no_word_match=[]
for i,c in enumerate(candidate_match):
c2=list(c)
if len(c2)==0:
result.append(-1)
continue
#word in the test instance
test_word=test_instance[i][2]
for j,m_id in enumerate(c2):
gold_word=gold_instance[int(m_id)][2]
if test_word==gold_word:
if int(m_id) not in matched_dict:
result.append(int(m_id))
matched_dict[int(m_id)]=1
break
# found=True
if len(result)==i:
no_word_match.append(i)
result.append(-1)
for i in no_word_match:
c2=list(candidate_match[i])
found=False
while len(c2)!=1:
rid=random.randint(0,len(c2)-1)
if c2[rid] in matched_dict:
c2.pop(rid)
# cur_rid=0
# while cur_rid<len(c2):
# #rid=random.randint(0,len(c2)-1)
# if c2[cur_rid] in matched_dict:
# cur_rid+=1
# continue
else:
matched_dict[c2[rid]]=1
result[i]=c2[rid]
# matched_dict[c2[cur_rid]]=1
# result[i]=c2[cur_rid]
found=True
break
if not found:
if c2[0] not in matched_dict:
result[i]=c2[0]
matched_dict[c2[0]]=1
## result[i]=-1
# if c2[0] not in matched_dict:
# result[i]=c2[0]
# matched_dict[c2[0]]=1
return result
def get_random_sol(candidate):
"""
Generate a random variable mapping.
Args:
candidate:a list of set and each set contains the candidate match of a test instance
"""
random.seed()
matched_dict={}
result=[]
for c in candidate:
c2=list(c)
found=False
if len(c2)==0:
result.append(-1)
continue
while len(c2)!=1:
rid=random.randint(0,len(c2)-1)
if c2[rid] in matched_dict:
c2.pop(rid)
else:
matched_dict[c2[rid]]=1
result.append(c2[rid])
found=True
break
if not found:
if c2[0] not in matched_dict:
result.append(c2[0])
matched_dict[c2[0]]=1
else:
result.append(-1)
return result
def compute_match(match,weight_dict):
"""Given a variable match, compute match number based on weight_dict.
Args:
match: a list of number in gold set, len(match)= number of test instance
Returns:
matching triple number
Complexity: O(m*n) , m is the length of test instance, n is the length of gold instance"""
#remember matching number of the previous matching we investigated
if tuple(match) in match_num_dict:
return match_num_dict[tuple(match)]
match_num=0
for i,m in enumerate(match):
if m==-1:
continue
cur_m=(i,m)
if cur_m not in weight_dict:
continue
match_num+=weight_dict[cur_m][-1]
for k in weight_dict[cur_m]:
if k==-1:
continue
if k[0]<i:
continue
elif match[k[0]]==k[1]:
match_num+=weight_dict[cur_m][k]
match_num_dict[tuple(match)]=match_num
return match_num
def move_gain(match,i,m,nm,weight_dict,match_num):
"""Compute the triple match number gain by the move operation
Args:
match: current match list
i: the remapped source variable
m: the original id
nm: new mapped id
weight_dict: weight dictionary
match_num: the original matching number
Returns:
the gain number (might be negative)"""
cur_m=(i,nm)
old_m=(i,m)
new_match=match[:]
new_match[i]=nm
if tuple(new_match) in match_num_dict:
return match_num_dict[tuple(new_match)]-match_num
gain=0
if cur_m in weight_dict:
gain+=weight_dict[cur_m][-1]
for k in weight_dict[cur_m]:
if k==-1:
continue
elif match[k[0]]==k[1]:
gain+=weight_dict[cur_m][k]
if old_m in weight_dict:
gain-=weight_dict[old_m][-1]
for k in weight_dict[old_m]:
if k==-1:
continue
elif match[k[0]]==k[1]:
gain-=weight_dict[old_m][k]
match_num_dict[tuple(new_match)]=match_num+gain
return gain
def swap_gain(match,i,m,j,m2,weight_dict,match_num):
"""Compute the triple match number gain by the swap operation
Args:
match: current match list
i: the position 1
m: the original mapped variable of i
j: the position 2
m2: the original mapped variable of j
weight_dict: weight dictionary
match_num: the original matching number
Returns:
the gain number (might be negative)"""
new_match=match[:]
new_match[i]=m2
new_match[j]=m
gain=0
cur_m=(i,m2)
cur_m2=(j,m)
old_m=(i,m)
old_m2=(j,m2)
if cur_m in weight_dict:
gain+=weight_dict[cur_m][-1]
if cur_m2 in weight_dict[cur_m]:
gain+=weight_dict[cur_m][cur_m2]
for k in weight_dict[cur_m]:
if k==-1:
continue
elif k[0]==j:
continue
elif match[k[0]]==k[1]:
gain+=weight_dict[cur_m][k]
if cur_m2 in weight_dict:
gain+=weight_dict[cur_m2][-1]
for k in weight_dict[cur_m2]:
if k==-1:
continue
elif k[0]==i:
continue
elif match[k[0]]==k[1]:
gain+=weight_dict[cur_m2][k]
if old_m in weight_dict:
gain-=weight_dict[old_m][-1]
if old_m2 in weight_dict[old_m]:
gain-=weight_dict[old_m][old_m2]
for k in weight_dict[old_m]:
if k==-1:
continue
elif k[0]==j:
continue
elif match[k[0]]==k[1]:
gain-=weight_dict[old_m][k]
if old_m2 in weight_dict:
gain-=weight_dict[old_m2][-1]
for k in weight_dict[old_m2]:
if k==-1:
continue
elif k[0]==i:
continue
elif match[k[0]]==k[1]:
gain-=weight_dict[old_m2][k]
match_num_dict[tuple(new_match)]=match_num+gain
return gain
def get_best_gain(match,candidate_match,weight_dict,gold_len,start_match_num):
""" hill-climbing method to return the best gain swap/move can get
Args:
match: the initial variable mapping
candidate_match: the match candidates list
weight_dict: the weight dictionary
gold_len: the number of the variables in file 2
start_match_num: the initial match number
Returns:
the best gain we can get via swap/move operation"""
largest_gain=0
largest_match_num=0
swap=True #True: using swap False: using move
change_list=[]
#unmatched gold number
unmatched_gold=set(range(0,gold_len))
#O(gold_len)
for m in match:
if m in unmatched_gold:
unmatched_gold.remove(m)
unmatch_list=list(unmatched_gold)
for i,m in enumerate(match):
#remap i
for nm in unmatch_list:
if nm in candidate_match[i]:
#(i,m) -> (i,nm)
gain=move_gain(match,i,m,nm,weight_dict,start_match_num)
if verbose:
new_match=match[:]
new_match[i]=nm
new_m_num=compute_match(new_match,weight_dict)
if new_m_num!=start_match_num+gain:
print >> sys.stderr, match, new_match
print >> sys.stderr, "Inconsistency in computing: move gain", start_match_num, gain, new_m_num
if gain>largest_gain:
largest_gain=gain
change_list=[i,nm]
swap=False
largest_match_num=start_match_num+gain
for i,m in enumerate(match):
for j,m2 in enumerate(match):
#swap i
if i==j:
continue
new_match=match[:]
new_match[i]=m2
new_match[j]=m
sw_gain=swap_gain(match,i,m,j,m2,weight_dict,start_match_num)
if verbose:
new_match=match[:]
new_match[i]=m2
new_match[j]=m
new_m_num=compute_match(new_match,weight_dict)
if new_m_num!=start_match_num+sw_gain:
print >> sys.stderr, match, new_match
print >> sys.stderr, "Inconsistency in computing: swap gain", start_match_num, sw_gain, new_m_num
if sw_gain>largest_gain:
largest_gain=sw_gain
change_list=[i,j]
swap=True
cur_match=match[:]
largest_match_num=start_match_num+largest_gain
if change_list!=[]:
if swap:
temp=cur_match[change_list[0]]
cur_match[change_list[0]]=cur_match[change_list[1]]
cur_match[change_list[1]]=temp
# print >> sys.stderr,"swap gain"
else:
cur_match[change_list[0]]=change_list[1]
# print >> sys.stderr,"move gain"
return (largest_match_num,cur_match)
def get_fh(test_instance,test_relation1,test_relation2,gold_instance,gold_relation1,gold_relation2,test_label,gold_label):
"""Get the f-score given two sets of triples
Args:
iter_num: iteration number of heuristic search
test_instance: instance triples of AMR 1
test_relation1: relation triples of AMR 1 (one-variable)
test_relation2: relation triples of AMR 2 (two-variable)
gold_instance: instance triples of AMR 2
gold_relation1: relation triples of AMR 2 (one-variable)
gold_relation2: relation triples of AMR 2 (two-variable)
test_label: prefix label for AMRe 1
gold_label: prefix label for AMR 2
Returns:
best_match: the variable mapping which results in the best matching triple number
best_match_num: the highest matching number
"""
#compute candidate pool
(candidate_match,weight_dict)=compute_pool(test_instance,test_relation1,test_relation2,gold_instance,gold_relation1,gold_relation2,test_label,gold_label)
best_match_num=0
best_match=[-1]*len(test_instance)
for i in range(0,iter_num):
if verbose:
print >> sys.stderr,"Iteration",i
if i==0:
#smart initialization
start_match=init_match(candidate_match,test_instance,gold_instance)
else:
#random initialization
start_match=get_random_sol(candidate_match)
#first match_num, and store the match in memory
match_num=compute_match(start_match,weight_dict)
# match_num_dict[tuple(start_match)]=match_num
if verbose:
print >> sys.stderr, "starting point match num:",match_num
print >> sys.stderr,"start match",start_match
#hill-climbing
(largest_match_num,cur_match)=get_best_gain(start_match,candidate_match,weight_dict,len(gold_instance),match_num)
if verbose:
print >> sys.stderr, "Largest match number after the hill-climbing", largest_match_num
# match_num=largest_match_num
#hill-climbing until there will be no gain if we generate a new variable mapping
while largest_match_num>match_num:
match_num=largest_match_num
(largest_match_num,cur_match)=get_best_gain(cur_match,candidate_match,weight_dict,len(gold_instance),match_num)
if verbose:
print >> sys.stderr, "Largest match number after the hill-climbing", largest_match_num
if match_num>best_match_num:
best_match=cur_match[:]
best_match_num=match_num
return (best_match,best_match_num)
#help of inst_list: record a0 location in the test_instance ...
def print_alignment(match,test_instance,gold_instance,flip=False):
""" print the alignment based on a match
Args:
match: current match, denoted by a list
test_instance: instances of AMR 1
gold_instance: instances of AMR 2
filp: filp the test/gold or not"""
result=[]
for i,m in enumerate(match):
if m==-1:
if not flip:
result.append(test_instance[i][1]+"("+test_instance[i][2]+")"+"-Null")
else:
result.append("Null-"+test_instance[i][1]+"("+test_instance[i][2]+")")
else:
if not flip:
result.append(test_instance[i][1]+"("+test_instance[i][2]+")"+"-"+gold_instance[m][1]+"("+gold_instance[m][2]+")")
else:
result.append(gold_instance[m][1]+"("+gold_instance[m][2]+")"+"-"+test_instance[i][1]+"("+test_instance[i][2]+")")
return " ".join(result)
def compute_f(match_num,test_num,gold_num):
""" Compute the f-score based on the matching triple number, triple number of the AMR set 1, triple number of AMR set 2
Args:
match_num: matching triple number
test_num: triple number of AMR 1
gold_num: triple number of AMR 2
Returns:
precision: match_num/test_num
recall: match_num/gold_num
f_score: 2*precision*recall/(precision+recall)"""
if test_num==0 or gold_num==0:
return (0.00,0.00,0.00)
precision=float(match_num)/float(test_num)
recall=float(match_num)/float(gold_num)
if (precision+recall)!=0:
f_score=2*precision*recall/(precision+recall)
if verbose:
print >> sys.stderr, "F-score:",f_score
return (precision,recall,f_score)
else:
if verbose:
print >> sys.stderr, "F-score:","0.0"
return (precision,recall,0.00)
def main(args):
"""Main function of the smatch calculation program"""
global verbose
global iter_num
global single_score
global pr_flag
global match_num_dict
#set the restart number
iter_num=args.r+1
verbose=False
if args.ms:
single_score=False
if args.v:
verbose=True
if args.pr:
pr_flag=True
total_match_num=0
total_test_num=0
total_gold_num=0
sent_num=1
while True:
cur_amr1=get_amr_line(args.f[0])
cur_amr2=get_amr_line(args.f[1])
if cur_amr1=="" and cur_amr2=="":
break
if(cur_amr1==""):
print >> sys.stderr, "Error: File 1 has less AMRs than file 2"
print >> sys.stderr, "Ignoring remaining AMRs"
break
# print >> sys.stderr, "AMR 1 is empty"
#continue
if(cur_amr2==""):
print >> sys.stderr, "Error: File 2 has less AMRs than file 1"
print >> sys.stderr, "Ignoring remaining AMRs"
break
# print >> sys.stderr, "AMR 2 is empty"
# continue
amr1=amr.AMR.parse_AMR_line(cur_amr1)
amr2=amr.AMR.parse_AMR_line(cur_amr2)
test_label="a"
gold_label="b"
amr1.rename_node(test_label)
amr2.rename_node(gold_label)
(test_inst,test_rel1,test_rel2)=amr1.get_triples2()
(gold_inst,gold_rel1,gold_rel2)=amr2.get_triples2()
if verbose:
print "AMR pair",sent_num
print >> sys.stderr,"Instance triples of AMR 1:",len(test_inst)
print >> sys.stderr,test_inst
# print >> sys.stderr,"Relation triples of AMR 1:",len(test_rel)
print >> sys.stderr,"Relation triples of AMR 1:",len(test_rel1)+len(test_rel2)
print >>sys.stderr,test_rel1
print >> sys.stderr,test_rel2
# print >> sys.stderr, test_rel
print >> sys.stderr,"Instance triples of AMR 2:",len(gold_inst)
print >> sys.stderr,gold_inst
# print >> sys.stderr,"Relation triples of file 2:",len(gold_rel)
print >> sys.stderr,"Relation triples of AMR 2:",len(gold_rel1)+len(gold_rel2)
#print >> sys.stderr,"Relation triples of file 2:",len(gold_rel1)+len(gold_rel2)
print >> sys.stderr,gold_rel1
print >> sys.stderr,gold_rel2
# print >> sys.stderr, gold_rel
if len(test_inst)<len(gold_inst):
(best_match,best_match_num)=get_fh(test_inst,test_rel1,test_rel2,gold_inst,gold_rel1,gold_rel2,test_label,gold_label)
if verbose:
print >> sys.stderr, "AMR pair ",sent_num
print >> sys.stderr, "best match number",best_match_num
print >> sys.stderr, "best match", best_match
print >>sys.stderr,"Best Match:",print_alignment(best_match,test_inst,gold_inst)
else:
(best_match,best_match_num)=get_fh(gold_inst,gold_rel1,gold_rel2,test_inst,test_rel1,test_rel2,gold_label,test_label)
if verbose:
print >> sys.stderr, "Sent ",sent_num
print >> sys.stderr, "best match number",best_match_num
print >> sys.stderr, "best match", best_match
print >>sys.stderr,"Best Match:",print_alignment(best_match,gold_inst,test_inst,True)
if not single_score:
(precision,recall,best_f_score)=compute_f(best_match_num,len(test_rel1)+len(test_inst)+len(test_rel2),len(gold_rel1)+len(gold_inst)+len(gold_rel2))
print "Sentence",sent_num
if pr_flag:
print "Precision: %.2f" % precision
print "Recall: %.2f" % recall
print "Smatch score: %.2f" % best_f_score
total_match_num+=best_match_num
total_test_num+=len(test_rel1)+len(test_rel2)+len(test_inst)
total_gold_num+=len(gold_rel1)+len(gold_rel2)+len(gold_inst)
match_num_dict.clear()
sent_num+=1# print "F-score:",best_f_score
if verbose:
print >> sys.stderr, "Total match num"
print >> sys.stderr, total_match_num,total_test_num,total_gold_num
if single_score:
(precision,recall,best_f_score)=compute_f(total_match_num,total_test_num,total_gold_num)
if pr_flag:
print "Precision: %.2f" % precision
print "Recall: %.2f" % recall
print "Document F-score: %.2f" % best_f_score
args.f[0].close()
args.f[1].close()
if __name__=="__main__":
parser=None
args=None
if sys.version_info[:2]!=(2,7):
if sys.version_info[0]!=2 or sys.version_info[1]<5:
print >> ERROR_LOG, "Smatch only supports python 2.5 or later"
exit(1)
import optparse
if len(sys.argv)==1:
print >> ERROR_LOG,"No argument given. Please run smatch.py -h to see the argument descriptions."
exit(1)
#requires version >=2.3!
parser=build_arg_parser2();
(args,opts)=parser.parse_args()
#handling file errors
#if not len(args.f)<2:
# print >> ERROR_LOG,"File number given is less than 2"
# exit(1)
file_handle=[]
if args.f==None:
print >> ERROR_LOG, "smatch.py requires -f option to indicate two files containing AMR as input. Please run smatch.py -h to see the argument descriptions."
exit(1)
if not os.path.exists(args.f[0]):
print >> ERROR_LOG,"Given file",args.f[0], "does not exist"
exit(1)
else:
file_handle.append(open(args.f[0]))
if not os.path.exists(args.f[1]):
print >> ERROR_LOG,"Given file",args.f[1],"does not exist"
exit(1)
else:
file_handle.append(open(args.f[1]))
args.f=tuple(file_handle)
else: #version 2.7
import argparse
parser=build_arg_parser()
args=parser.parse_args()
main(args)
| bsd-2-clause |
yosshy/bergenholm | bergenholm/database/hosts.py | 1 | 2857 | # -*- encoding:utf-8 -*-
import logging
import jinja2
from flask import abort
from flask import json
from bergenholm.database.groups import get_group_params
mongo = None
jinja_env = jinja2.Environment()
INSTALLED = "installed"
UUID = "uuid"
def get_hosts(query={}):
hosts = mongo.db.hosts.find(query, {'_id': 1})
return {"hosts": [x["_id"] for x in hosts]}
def get_host(uuid):
host = mongo.db.hosts.find_one_or_404({'_id': uuid})
host.pop("_id", None)
return host
def create_host(uuid, params):
host = mongo.db.hosts.find_one({'_id': uuid})
if host:
abort(400)
try:
params['_id'] = uuid
mongo.db.hosts.insert(params)
except:
abort(400)
def update_host(uuid, params):
mongo.db.hosts.find_one_or_404({'_id': uuid})
try:
mongo.db.hosts.update({'_id': uuid}, params)
except:
abort(400)
def delete_host(uuid):
mongo.db.hosts.find_one_or_404({'_id': uuid})
try:
mongo.db.hosts.remove({'_id': uuid})
except:
abort(400)
def render_string(temp_str, **params):
if isinstance(temp_str, basestring):
template = jinja_env.from_string(temp_str)
return template.render(**params)
elif isinstance(temp_str, (dict, list)):
template = jinja_env.from_string(json.dumps(temp_str))
return json.loads(template.render(**params))
else:
return temp_str
def get_host_params(uuid):
host_params = mongo.db.hosts.find_one({'_id': uuid})
if host_params is None:
host_params = mongo.db.hosts.find_one({'_id': 'register'})
if host_params is None:
host_params = {}
host_params.pop("_id", None)
host_params[UUID] = uuid
current_params, _groups = get_group_params(host_params.get('groups'))
current_params.update(host_params)
current_params["groups"] = _groups
for i in range(10):
new_params = {}
for k, v in current_params.iteritems():
k = render_string(k, **current_params)
v = render_string(v, **current_params)
new_params[k] = v
if new_params == current_params:
break
current_params = new_params
logging.debug(current_params)
return current_params
def mark_host_installed(uuid):
host = mongo.db.hosts.find_one_or_404({'_id': uuid})
host.pop("_id", None)
if INSTALLED in host["groups"]:
abort(400)
host["groups"].append(INSTALLED)
try:
mongo.db.hosts.update({'_id': uuid}, host)
except:
abort(400)
def unmark_host_installed(uuid):
host = mongo.db.hosts.find_one_or_404({'_id': uuid})
host.pop("_id", None)
if INSTALLED not in host["groups"]:
abort(400)
host["groups"].remove(INSTALLED)
try:
mongo.db.hosts.update({'_id': uuid}, host)
except:
abort(400)
| apache-2.0 |
kurikaesu/arsenalsuite | cpp/lib/PyQt4/examples/itemviews/editabletreemodel/ui_mainwindow.py | 15 | 5202 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Fri Mar 27 22:12:50 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(573, 468)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setMargin(0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName("vboxlayout")
self.view = QtGui.QTreeView(self.centralwidget)
self.view.setAlternatingRowColors(True)
self.view.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.view.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.view.setAnimated(False)
self.view.setAllColumnsShowFocus(True)
self.view.setObjectName("view")
self.vboxlayout.addWidget(self.view)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 573, 31))
self.menubar.setObjectName("menubar")
self.fileMenu = QtGui.QMenu(self.menubar)
self.fileMenu.setObjectName("fileMenu")
self.actionsMenu = QtGui.QMenu(self.menubar)
self.actionsMenu.setObjectName("actionsMenu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.exitAction = QtGui.QAction(MainWindow)
self.exitAction.setObjectName("exitAction")
self.insertRowAction = QtGui.QAction(MainWindow)
self.insertRowAction.setObjectName("insertRowAction")
self.removeRowAction = QtGui.QAction(MainWindow)
self.removeRowAction.setObjectName("removeRowAction")
self.insertColumnAction = QtGui.QAction(MainWindow)
self.insertColumnAction.setObjectName("insertColumnAction")
self.removeColumnAction = QtGui.QAction(MainWindow)
self.removeColumnAction.setObjectName("removeColumnAction")
self.insertChildAction = QtGui.QAction(MainWindow)
self.insertChildAction.setObjectName("insertChildAction")
self.fileMenu.addAction(self.exitAction)
self.actionsMenu.addAction(self.insertRowAction)
self.actionsMenu.addAction(self.insertColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.removeRowAction)
self.actionsMenu.addAction(self.removeColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.insertChildAction)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.actionsMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Editable Tree Model", None, QtGui.QApplication.UnicodeUTF8))
self.fileMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.actionsMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&Actions", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Row", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, R", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Row", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, R", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Column", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, C", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Column", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, C", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Child", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+N", None, QtGui.QApplication.UnicodeUTF8))
import editabletreemodel_rc
| gpl-2.0 |
ashray/VTK-EVM | Filters/General/Testing/Python/clipImage.py | 20 | 1534 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
v16 = vtk.vtkVolume16Reader()
v16.SetDataDimensions(64,64)
v16.GetOutput().SetOrigin(0.0,0.0,0.0)
v16.SetDataByteOrderToLittleEndian()
v16.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
v16.SetImageRange(45,45)
v16.SetDataSpacing(3.2,3.2,1.5)
v16.Update()
# do the pixel clipping
clip = vtk.vtkClipDataSet()
clip.SetInputConnection(v16.GetOutputPort())
clip.SetValue(1000)
clipMapper = vtk.vtkDataSetMapper()
clipMapper.SetInputConnection(clip.GetOutputPort())
clipMapper.ScalarVisibilityOff()
clipActor = vtk.vtkActor()
clipActor.SetMapper(clipMapper)
# put an outline around the data
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(v16.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.VisibilityOff()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(clipActor)
ren1.SetBackground(0,0,0)
renWin.SetSize(200,200)
iren.Initialize()
# render the image
#
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
PetrDlouhy/django | tests/timezones/tests.py | 4 | 54664 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='super@example.com',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
| bsd-3-clause |
TwolDE2/enigma2 | lib/python/Plugins/SystemPlugins/ConfigurationBackup/plugin.py | 42 | 8494 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.Console import Console
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.MenuList import MenuList
from Components.config import ConfigSelection, ConfigSubsection, KEY_LEFT, KEY_RIGHT, KEY_0, getConfigListEntry
from Components.ConfigList import ConfigList
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import *
from os import path, makedirs, listdir
from time import localtime
from datetime import date
plugin_path = ""
# FIXME: harddiskmanager has a better overview about available mointpoints!
BackupPath = {
"mtd" : "/media/backup",
"hdd" : "/media/hdd/backup",
"usb" : "/media/usb/backup",
"cf" : "/media/cf/backup"
}
MountPoints = {
"mtd" : "/media/backup",
"hdd" : "/media/hdd",
"usb" : "/media/usb",
"cf" : "/media/cf"
}
class BackupSetup(Screen):
skin = """
<screen position="135,144" size="450,300" title="Backup and Restore" >
<widget name="config" position="10,10" size="430,240" />
<widget name="cancel" position="10,255" size="100,40" pixmap="~/red.png" transparent="1" alphatest="on" />
<widget name="canceltext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
<widget name="ok" position="120,255" size="100,40" pixmap="~/green.png" transparent="1" alphatest="on" />
<widget name="oktext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
<widget name="restore" position="230,255" size="100,40" pixmap="~/yellow.png" transparent="1" alphatest="on" />
<widget name="restoretext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
<widget name="backup" position="340,255" size="100,40" pixmap="~/blue.png" transparent="1" alphatest="on" />
<widget name="backuptext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
</screen>"""
def keyLeft(self):
self["config"].handleKey(KEY_LEFT)
def keyRight(self):
self["config"].handleKey(KEY_RIGHT)
def keyNumberGlobal(self, number):
print "You pressed number", number
if (self["config"].getCurrent()[1].parent.enabled == True):
self["config"].handleKey(KEY_0+number)
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def keySave(self):
for x in self["config"].list:
x[1].save()
self.close()
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["backuptext"] = Label(_("Backup"))
self["restoretext"] = Label(_("Restore"))
self["restore"] = Pixmap()
self["backup"] = Pixmap()
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.path = ""
self.list = []
self["config"] = ConfigList(self.list)
self.createSetup()
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.keySave,
"cancel": self.keyCancel,
"left": self.keyLeft,
"right": self.keyRight
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.keySave,
"blue": self.Backup,
"yellow": self.Restore,
})
def createSetup(self):
print "Creating BackupSetup"
self.list = [ ]
self["config"] = ConfigList(self.list)
self.backup = ConfigSubsection()
self.backup.type = ConfigSelection(choices = [("settings", _("enigma2 and network")), ("var", _("/var directory")), ("skin", _("/usr/share/enigma2 directory"))], default="settings")
self.backup.location = ConfigSelection(choices = [("mtd", _("Backup")), ("hdd", _("Harddisk")), ("usb", _("USB Stick")), ("cf", _("CF Drive"))])
self.list.append(getConfigListEntry(_("Backup Mode"), self.backup.type))
self.list.append(getConfigListEntry(_("Backup Location"), self.backup.location))
def createBackupfolders(self):
self.path = BackupPath[self.backup.location.value]
print "Creating Backup Folder if not already there..."
if (path.exists(self.path) == False):
makedirs(self.path)
def Backup(self):
print "this will start the backup now!"
self.session.openWithCallback(self.runBackup, MessageBox, _("Do you want to backup now?\nAfter pressing OK, please wait!"))
def Restore(self):
print "this will start the restore now!"
self.session.open(RestoreMenu, self.backup)
def runBackup(self, result):
if result:
if path.ismount(MountPoints[self.backup.location.value]):
self.createBackupfolders()
d = localtime()
dt = date(d.tm_year, d.tm_mon, d.tm_mday)
self.path = BackupPath[self.backup.location.value]
if self.backup.type.value == "settings":
print "Backup Mode: Settings"
self.session.open(Console, title = "Backup running", cmdlist = ["tar -czvf " + self.path + "/" + str(dt) + "_settings_backup.tar.gz /etc/enigma2/ /etc/network/interfaces /etc/wpa_supplicant.conf"])
elif self.backup.type.value == "var":
print "Backup Mode: var"
self.session.open(Console, title = "Backup running", cmdlist = [ "tar -czvf " + self.path + "/" + str(dt) + "_var_backup.tar.gz /var/"])
elif self.backup.type.value == "skin":
print "Backup Mode: skin"
self.session.open(Console, title ="Backup running", cmdlist = [ "tar -czvf " + self.path + "/" + str(dt) + "_skin_backup.tar.gz /usr/share/enigma2/"])
else:
self.session.open(MessageBox, _("Sorry your Backup destination does not exist\n\nPlease choose an other one."), MessageBox.TYPE_INFO)
class RestoreMenu(Screen):
skin = """
<screen position="135,144" size="450,300" title="Restore Backups" >
<widget name="filelist" position="10,10" size="430,240" scrollbarMode="showOnDemand" />
<widget name="cancel" position="120,255" size="100,40" pixmap="~/red.png" transparent="1" alphatest="on" />
<widget name="canceltext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
<widget name="restore" position="230,255" size="100,40" pixmap="~/yellow.png" transparent="1" alphatest="on" />
<widget name="restoretext" position="0,0" size="0,0" valign="center" halign="center" zPosition="2" font="Regular;20" transparent="1" foregroundColor="black" />
</screen>"""
def __init__(self, session, backup):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.backup = backup
self["canceltext"] = Label(_("Cancel"))
self["restoretext"] = Label(_("Restore"))
self["restore"] = Pixmap()
self["cancel"] = Pixmap()
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"yellow": self.KeyOk,
})
self.flist = []
self["filelist"] = MenuList(self.flist)
self.fill_list()
def fill_list(self):
self.flist = []
self.path = BackupPath[self.backup.location.value]
if (path.exists(self.path) == False):
makedirs(self.path)
for file in listdir(self.path):
if (file.endswith(".tar.gz")):
self.flist.append((file))
self.entry = True
self["filelist"].l.setList(self.flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
self.val = self.path + self.sel
self.session.openWithCallback(self.startRestore, MessageBox, _("are you sure you want to restore\nfollowing backup:\n" + self.sel + "\nEnigma2 will restart after the restore"))
def keyCancel(self):
self.close()
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(Console, title = "Restore running", cmdlist = ["tar -xzvf " + self.path + "/" + self.sel + " -C /", "killall -9 enigma2"])
def Exit(self):
self.close()
def BackupMain(session, **kwargs):
session.open(BackupSetup)
def Plugins(path, **kwargs):
global plugin_path
plugin_path = path
return PluginDescriptor(name="Backup/Restore", description="Backup and Restore your Settings", icon="backup.png", where = PluginDescriptor.WHERE_PLUGINMENU, fnc=BackupMain)
| gpl-2.0 |
dhirajpatra/mailboxapi | vendor/psy/psysh/test/tools/gen_unvis_fixtures.py | 536 | 3120 | #! /usr/bin/env python3
import sys
from os.path import abspath, expanduser, dirname, join
from itertools import chain
import json
import argparse
from vis import vis, unvis, VIS_WHITE
__dir__ = dirname(abspath(__file__))
OUTPUT_FILE = join(__dir__, '..', 'fixtures', 'unvis_fixtures.json')
# Add custom fixtures here
CUSTOM_FIXTURES = [
# test long multibyte string
''.join(chr(cp) for cp in range(1024)),
'foo bar',
'foo\nbar',
"$bar = 'baz';",
r'$foo = "\x20\\x20\\\x20\\\\x20"',
'$foo = function($bar) use($baz) {\n\treturn $baz->getFoo()\n};'
]
RANGES = {
# All valid codepoints in the BMP
'bmp': chain(range(0x0000, 0xD800), range(0xE000, 0xFFFF)),
# Smaller set of pertinent? codepoints inside BMP
# see: http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
'small': chain(
# latin blocks
range(0x0000, 0x0250),
# Greek, Cyrillic
range(0x0370, 0x0530),
# Hebrew, Arabic
range(0x590, 0x0700),
# CJK radicals
range(0x2E80, 0x2F00),
# Hiragana, Katakana
range(0x3040, 0x3100)
)
}
if __name__ == '__main__':
argp = argparse.ArgumentParser(
description='Generates test data for Psy\\Test\\Util\\StrTest')
argp.add_argument('-f', '--format-output', action='store_true',
help='Indent JSON output to ease debugging')
argp.add_argument('-a', '--all', action='store_true',
help="""Generates test data for all codepoints of the BMP.
(same as --range=bmp). WARNING: You will need quite
a lot of RAM to run the testsuite !
""")
argp.add_argument('-r', '--range',
help="""Choose the range of codepoints used to generate
test data.""",
choices=list(RANGES.keys()),
default='small')
argp.add_argument('-o', '--output-file',
help="""Write test data to OUTPUT_FILE
(defaults to PSYSH_DIR/test/fixtures)""")
args = argp.parse_args()
cp_range = RANGES['bmp'] if args.all else RANGES[args.range]
indent = 2 if args.format_output else None
if args.output_file:
OUTPUT_FILE = abspath(expanduser(args.output_file))
fixtures = []
# use SMALL_RANGE by default, it should be enough.
# use BMP_RANGE for a more complete smoke test
for codepoint in cp_range:
char = chr(codepoint)
encoded = vis(char, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
# Add our own custom fixtures at the end,
# since they would fail anyway if one of the previous did.
for fixture in CUSTOM_FIXTURES:
encoded = vis(fixture, VIS_WHITE)
decoded = unvis(encoded)
fixtures.append((encoded, decoded))
with open(OUTPUT_FILE, 'w') as fp:
# dump as json to avoid backslashin and quotin nightmare
# between php and python
json.dump(fixtures, fp, indent=indent)
sys.exit(0)
| gpl-3.0 |
youfoh/webkit-efl | Source/WebKit/chromium/scripts/inline_js_imports.py | 17 | 3145 | #!/usr/bin/env python
#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script replaces calls to importScripts with script sources
# in input script file and dumps result into output script file.
from cStringIO import StringIO
import jsmin
import os.path
import re
import sys
def main(argv):
if len(argv) < 3:
print('usage: %s input_file imports_dir output_file' % argv[0])
return 1
input_file_name = argv[1]
imports_dir = argv[2]
output_file_name = argv[3]
input_file = open(input_file_name, 'r')
input_script = input_file.read()
input_file.close()
def replace(match):
import_file_name = match.group(1)
full_path = os.path.join(imports_dir, import_file_name)
if not os.access(full_path, os.F_OK):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(import_file_name, input_file_name))
import_file = open(full_path, 'r')
import_script = import_file.read()
import_file.close()
return import_script
output_script = re.sub(r'importScripts\([\'"]([^\'"]+)[\'"]\)', replace, input_script)
output_file = open(output_file_name, 'w')
output_file.write(jsmin.jsmin(output_script))
output_file.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(output_file_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| lgpl-2.1 |
tliber/scrapy | scrapy/utils/gz.py | 57 | 1109 | import struct
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from gzip import GzipFile
def gunzip(data):
"""Gunzip the given data and return as much data as possible.
This is resilient to CRC checksum errors.
"""
f = GzipFile(fileobj=BytesIO(data))
output = b''
chunk = b'.'
while chunk:
try:
chunk = f.read(8196)
output += chunk
except (IOError, EOFError, struct.error):
# complete only if there is some data, otherwise re-raise
# see issue 87 about catching struct.error
# some pages are quite small so output is '' and f.extrabuf
# contains the whole page content
if output or f.extrabuf:
output += f.extrabuf
break
else:
raise
return output
def is_gzipped(response):
"""Return True if the response is gzipped, or False otherwise"""
ctype = response.headers.get('Content-Type', b'')
return ctype in (b'application/x-gzip', b'application/gzip')
| bsd-3-clause |
ikotpk/android_kernel_samsung_vastoskt | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
apache/airflow | airflow/providers/plexus/operators/job.py | 3 | 6123 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import time
from typing import Any, Dict, Optional
import requests
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.plexus.hooks.plexus import PlexusHook
logger = logging.getLogger(__name__)
class PlexusJobOperator(BaseOperator):
"""
Submits a Plexus job.
:param job_params: parameters required to launch a job.
:type job_params: dict
Required job parameters are the following
- "name": job name created by user.
- "app": name of the application to run. found in Plexus UI.
- "queue": public cluster name. found in Plexus UI.
- "num_nodes": number of nodes.
- "num_cores": number of cores per node.
"""
def __init__(self, job_params: Dict, **kwargs) -> None:
super().__init__(**kwargs)
self.job_params = job_params
self.required_params = {"name", "app", "queue", "num_cores", "num_nodes"}
self.lookups = {
"app": ("apps/", "id", "name"),
"billing_account_id": ("users/{}/billingaccounts/", "id", None),
"queue": ("queues/", "id", "public_name"),
}
self.job_params.update({"billing_account_id": None})
self.is_service = None
def execute(self, context: Any) -> Any:
hook = PlexusHook()
params = self.construct_job_params(hook)
if self.is_service is True:
if self.job_params.get("expected_runtime") is None:
end_state = "Running"
else:
end_state = "Finished"
elif self.is_service is False:
end_state = "Completed"
else:
raise AirflowException(
"Unable to determine if application "
"is running as a batch job or service. "
"Contact Core Scientific AI Team."
)
logger.info("creating job w/ following params: %s", params)
jobs_endpoint = hook.host + "jobs/"
headers = {"Authorization": f"Bearer {hook.token}"}
create_job = requests.post(jobs_endpoint, headers=headers, data=params, timeout=5)
if create_job.ok:
job = create_job.json()
jid = job["id"]
state = job["last_state"]
while state != end_state:
time.sleep(3)
jid_endpoint = jobs_endpoint + f"{jid}/"
get_job = requests.get(jid_endpoint, headers=headers, timeout=5)
if not get_job.ok:
raise AirflowException(
"Could not retrieve job status. Status Code: [{}]. "
"Reason: {} - {}".format(get_job.status_code, get_job.reason, get_job.text)
)
new_state = get_job.json()["last_state"]
if new_state in ("Cancelled", "Failed"):
raise AirflowException(f"Job {new_state}")
elif new_state != state:
logger.info("job is %s", new_state)
state = new_state
else:
raise AirflowException(
"Could not start job. Status Code: [{}]. "
"Reason: {} - {}".format(create_job.status_code, create_job.reason, create_job.text)
)
def _api_lookup(self, param: str, hook):
lookup = self.lookups[param]
key = lookup[1]
mapping = None if lookup[2] is None else (lookup[2], self.job_params[param])
if param == "billing_account_id":
endpoint = hook.host + lookup[0].format(hook.user_id)
else:
endpoint = hook.host + lookup[0]
headers = {"Authorization": f"Bearer {hook.token}"}
response = requests.get(endpoint, headers=headers, timeout=5)
results = response.json()["results"]
v = None
if mapping is None:
v = results[0][key]
else:
for dct in results:
if dct[mapping[0]] == mapping[1]:
v = dct[key]
if param == 'app':
self.is_service = dct['is_service']
if v is None:
raise AirflowException(f"Could not locate value for param:{key} at endpoint: {endpoint}")
return v
def construct_job_params(self, hook: Any) -> Dict[Any, Optional[Any]]:
"""
Creates job_params dict for api call to
launch a Plexus job.
Some parameters required to launch a job
are not available to the user in the Plexus
UI. For example, an app id is required, but
only the app name is provided in the UI.
This function acts as a backend lookup
of the required param value using the
user-provided value.
:param hook: plexus hook object
:type hook: airflow hook
"""
missing_params = self.required_params - set(self.job_params)
if len(missing_params) > 0:
raise AirflowException(f"Missing the following required job_params: {', '.join(missing_params)}")
params = {}
for prm in self.job_params:
if prm in self.lookups:
v = self._api_lookup(param=prm, hook=hook)
params[prm] = v
else:
params[prm] = self.job_params[prm]
return params
| apache-2.0 |
rlustin/letsencrypt | letsencrypt-nginx/letsencrypt_nginx/nginxparser.py | 19 | 4201 | """Very low-level nginx config parser based on pyparsing."""
import string
from pyparsing import (
Literal, White, Word, alphanums, CharsNotIn, Forward, Group,
Optional, OneOrMore, Regex, ZeroOrMore)
from pyparsing import stringEnd
from pyparsing import restOfLine
class RawNginxParser(object):
# pylint: disable=expression-not-assigned
"""A class that parses nginx configuration with pyparsing."""
# constants
left_bracket = Literal("{").suppress()
right_bracket = Literal("}").suppress()
semicolon = Literal(";").suppress()
space = White().suppress()
key = Word(alphanums + "_/")
# Matches anything that is not a special character AND any chars in single
# or double quotes
value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
location = CharsNotIn("{};," + string.whitespace)
# modifier for location uri [ = | ~ | ~* | ^~ ]
modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
# rules
comment = Literal('#') + restOfLine()
assignment = (key + Optional(space + value, default=None) + semicolon)
location_statement = Optional(space + modifier) + Optional(space + location)
if_statement = Literal("if") + space + Regex(r"\(.+\)") + space
block = Forward()
block << Group(
(Group(key + location_statement) ^ Group(if_statement)) +
left_bracket +
Group(ZeroOrMore(Group(comment | assignment) | block)) +
right_bracket)
script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd
def __init__(self, source):
self.source = source
def parse(self):
"""Returns the parsed tree."""
return self.script.parseString(self.source)
def as_list(self):
"""Returns the parsed tree as a list."""
return self.parse().asList()
class RawNginxDumper(object):
# pylint: disable=too-few-public-methods
"""A class that dumps nginx configuration from the provided tree."""
def __init__(self, blocks, indentation=4):
self.blocks = blocks
self.indentation = indentation
def __iter__(self, blocks=None, current_indent=0, spacer=' '):
"""Iterates the dumped nginx content."""
blocks = blocks or self.blocks
for key, values in blocks:
indentation = spacer * current_indent
if isinstance(key, list):
if current_indent:
yield ''
yield indentation + spacer.join(key) + ' {'
for parameter in values:
dumped = self.__iter__([parameter], current_indent + self.indentation)
for line in dumped:
yield line
yield indentation + '}'
else:
if key == '#':
yield spacer * current_indent + key + values
else:
if values is None:
yield spacer * current_indent + key + ';'
else:
yield spacer * current_indent + key + spacer + values + ';'
def as_string(self):
"""Return the parsed block as a string."""
return '\n'.join(self) + '\n'
# Shortcut functions to respect Python's serialization interface
# (like pyyaml, picker or json)
def loads(source):
"""Parses from a string.
:param str souce: The string to parse
:returns: The parsed tree
:rtype: list
"""
return RawNginxParser(source).as_list()
def load(_file):
"""Parses from a file.
:param file _file: The file to parse
:returns: The parsed tree
:rtype: list
"""
return loads(_file.read())
def dumps(blocks, indentation=4):
"""Dump to a string.
:param list block: The parsed tree
:param int indentation: The number of spaces to indent
:rtype: str
"""
return RawNginxDumper(blocks, indentation).as_string()
def dump(blocks, _file, indentation=4):
"""Dump to a file.
:param list block: The parsed tree
:param file _file: The file to dump to
:param int indentation: The number of spaces to indent
:rtype: NoneType
"""
return _file.write(dumps(blocks, indentation))
| apache-2.0 |
cmu-relab/lrsl-data | src/lrsl/data.py | 1 | 6260 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import os
import sys
import string
import unicodedata
class StateLaw:
_allowed_chars = set( ',()[]";:.-$§ \n' + "'"
+ string.ascii_letters
+ string.digits
#+ string.punctuation
)
_replace_chars = {
# Fancy quotes are annoyances
#'“': '""',
#'”': '""',
#'’': "'",
# And so are dashes
#'—': '-',
#'–': '-',
#'‑': '-',
# Manual is error prone - use Unicode Character Classes
# Unicode initial and final punctuation classes
# See: https://www.compart.com/en/unicode/category/Pi
'\u2018': "'", # Left Single Quotation Mark
'\u201B': "'", # Single High-reversed-9 Quotation Mark
'\u201C': '"', # Left Double Quotation Mark
'\u201F': '"', # Double High-reversed-9 Quotation Mark
# See: https://www.compart.com/en/unicode/category/Pf
'\u2019': "''", # Right Single Quotation Mark
'\u201D': "''", # Right Double Quotation Mark
# Unicode dash character class, as per:
# https://www.compart.com/en/unicode/category/Pd
'\u002D': '-', # Hyphen-minus
'\u058A': '-', # Armenian Hyphen
'\u05BE': '-', # Hebrew Punctuation Maqaf
'\u1400': '-', # Canadian Syllabics Hyphen
'\u1806': '-', # Mongolian Todo Soft Hyphen
'\u2010': '-', # Hyphen
'\u2011': '-', # Non-breaking Hyphen
'\u2012': '-', # Figure Dash
'\u2013': '-', # En Dash
'\u2014': '-', # Em Dash
'\u2015': '-', # Horizontal Bar
'\u2E17': '-', # Double Oblique Hyphen
'\u2E1A': '-', # Hyphen With Diaeresis
'\u2E3A': '-', # Two-em Dash
'\u2E3B': '-', # Three-em Dash
'\u2E40': '-', # Double Hyphen
'\u301C': '-', # Wave Dash
'\u3030': '-', # Wavy Dash
'\u30A0': '-', # Katakana-hiragana Double Hyphen
'\uFE31': '-', # Presentation Form For Vertical Em Dash
'\uFE32': '-', # Presentation Form For Vertical En Dash
'\uFE58': '-', # Small Em Dash
'\uFE63': '-', # Small Hyphen-minus
'\uFF0D': '-', # Fullwidth Hyphen-minus
# spaces are annoyances
# https://www.cs.tut.fi/~jkorpela/chars/spaces.html
'\u0020': ' ', # <Space> (SP)
'\u00A0': ' ', # <No-break space> (NBSP)
'\u1680': ' ', # Ogham Space Mark
'\u180E': ' ', # Mongolian Vowel Separator No Width
'\u2000': ' ', # En Quad (1 en = 1/2 em)
'\u2001': ' ', # Em Quad (1 em = normally, the height of the font)
'\u2002': ' ', # En Space (1 en = 1/2 em)
'\u2003': ' ', # Em Space (1 em)
'\u2004': ' ', # Three-per-em Space 1/3 em
'\u2005': ' ', # Four-per-em Space 1/4 em
'\u2006': ' ', # Six-per-em Space 1/6 em
'\u2007': ' ', # Figure Space ("Tabular width" = the width of digits)
'\u2008': ' ', # Punctuation Space ("The width of a period '.'")
'\u2009': ' ', # Thin Space (1/5 em or sometimes 1/6 em)
'\u200A': ' ', # Hair Space (Narrorwer than THIN SPACE)
'\u200B': ' ', # Zero Width Space (Nominally no width, but may expand)
'\u202F': ' ', # Narrow No-break Space (Narrower than NBSP or SP)
'\u205F': ' ', # Medium Mathematical Space (4/18 em)
'\u3000': ' ', # Ideographic (CJK characters) Space
'\uFEFF': ' ', # Zero Width NBSP (invisible char)
'\xa0': ' ', # non-breaking space in Latin1 (ISO 8859-1)
#This is a line separator char, but comes in as part of whitespace
# between Utah's PAR_NUM and PAR_TITLE annotations
# We assume manual preprocessing of lines, and replace it with space
'\u2028': ' ',
# Hawaii's legislative assembly has unicode name
# TODO: Use unidecode module: http://stackoverflow.com/a/4162694/800207
# >>> from unidecode import unidecode
# >>> unidecode(u'ıöüç')
# 'iouc'
# The module should also be able to handle fancy quotes and other
# annoyances as well. See:
# https://github.com/avian2/unidecode/blob/master/unidecode/x020.py
'å': 'a',
}
def __init__(self, state, filename, state_laws_dir=None):
self.state = state
self.filename = filename
if state_laws_dir:
self.file = os.path.join(state_laws_dir, state, filename)
else:
self.file = os.path.join(state, filename)
if not os.path.exists(self.file):
raise FileNotFoundError
self.original_string = None
with open(self.file, 'r') as law_file:
self.original_string = law_file.read()
if self.original_string:
# clear up the document by replacing annoyances defined in
# StateLaw._replace_chars class variable.
self.document = ''.join(StateLaw._replace_chars.get(x,x)
for x in self.original_string)
# TODO: Use unidecode module maybe?
#self.document = unicodedata.normalize('NFKD', self.original_string)
else:
raise ValueError('self.original_string is empty.')
self.lines = self.document.splitlines()
self.chars = set(self.document)
print(self.chars - StateLaw._allowed_chars)
#self.spaced_words = set(self.document.split())
def __str__(self):
return self.filename[:-4]
def main():
state_laws_csv = './docs/state_laws.csv'
state_laws_dir = './docs'
state_laws = get_state_laws(state_laws_csv)
for state, law_filename in state_laws:
state_law = StateLaw(state, law_filename, state_laws_dir)
#break
#state_laws = get_state_laws('./state_laws.csv')
#for state, law_filename in state_laws:
# state_law = StateLaw(state, law_filename)
# #break
def get_state_laws(state_laws_filename):
with open(state_laws_filename, 'r') as state_laws_file:
state_laws_reader = csv.reader(state_laws_file)
header = next(state_laws_reader)
return list(state_laws_reader)
if __name__ == '__main__':
main()
| mit |
rubikloud/scikit-learn | examples/decomposition/plot_pca_iris.py | 2 | 1805 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[[0]])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[[1]])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
prices/openocd | contrib/rpc_examples/ocd_rpc_example.py | 74 | 4291 | #!/usr/bin/env python3
"""
OpenOCD RPC example, covered by GNU GPLv3 or later
Copyright (C) 2014 Andreas Ortmann (ortmann@finf.uni-hannover.de)
Example output:
./ocd_rpc_example.py
echo says hi!
target state: halted
target halted due to debug-request, current mode: Thread
xPSR: 0x01000000 pc: 0x00000188 msp: 0x10000fd8
variable @ 0x10000000: 0x01c9c380
variable @ 0x10000000: 0xdeadc0de
memory (before): ['0xdeadc0de', '0x00000011', '0xaaaaaaaa', '0x00000023',
'0x00000042', '0x0000ffff']
memory (after): ['0x00000001', '0x00000000', '0xaaaaaaaa', '0x00000023',
'0x00000042', '0x0000ffff']
"""
import socket
import itertools
def strToHex(data):
return map(strToHex, data) if isinstance(data, list) else int(data, 16)
def hexify(data):
return "<None>" if data is None else ("0x%08x" % data)
def compareData(a, b):
for i, j, num in zip(a, b, itertools.count(0)):
if i != j:
print("difference at %d: %s != %s" % (num, hexify(i), hexify(j)))
class OpenOcd:
COMMAND_TOKEN = '\x1a'
def __init__(self, verbose=False):
self.verbose = verbose
self.tclRpcIp = "127.0.0.1"
self.tclRpcPort = 6666
self.bufferSize = 4096
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def __enter__(self):
self.sock.connect((self.tclRpcIp, self.tclRpcPort))
return self
def __exit__(self, type, value, traceback):
try:
self.send("exit")
finally:
self.sock.close()
def send(self, cmd):
"""Send a command string to TCL RPC. Return the result that was read."""
data = (cmd + OpenOcd.COMMAND_TOKEN).encode("utf-8")
if self.verbose:
print("<- ", data)
self.sock.send(data)
return self._recv()
def _recv(self):
"""Read from the stream until the token (\x1a) was received."""
data = bytes()
while True:
chunk = self.sock.recv(self.bufferSize)
data += chunk
if bytes(OpenOcd.COMMAND_TOKEN, encoding="utf-8") in chunk:
break
if self.verbose:
print("-> ", data)
data = data.decode("utf-8").strip()
data = data[:-1] # strip trailing \x1a
return data
def readVariable(self, address):
raw = self.send("ocd_mdw 0x%x" % address).split(": ")
return None if (len(raw) < 2) else strToHex(raw[1])
def readMemory(self, wordLen, address, n):
self.send("array unset output") # better to clear the array before
self.send("mem2array output %d 0x%x %d" % (wordLen, address, n))
output = self.send("ocd_echo $output").split(" ")
return [int(output[2*i+1]) for i in range(len(output)//2)]
def writeVariable(self, address, value):
assert value is not None
self.send("mww 0x%x 0x%x" % (address, value))
def writeMemory(self, wordLen, address, n, data):
array = " ".join(["%d 0x%x" % (a, b) for a, b in enumerate(data)])
self.send("array unset 1986ве1т") # better to clear the array before
self.send("array set 1986ве1т { %s }" % array)
self.send("array2mem 1986ве1т 0x%x %s %d" % (wordLen, address, n))
if __name__ == "__main__":
def show(*args):
print(*args, end="\n\n")
with OpenOcd() as ocd:
ocd.send("reset")
show(ocd.send("ocd_echo \"echo says hi!\"")[:-1])
show(ocd.send("capture \"ocd_halt\"")[:-1])
# Read the first few words at the RAM region (put starting adress of RAM
# region into 'addr')
addr = 0x10000000
value = ocd.readVariable(addr)
show("variable @ %s: %s" % (hexify(addr), hexify(value)))
ocd.writeVariable(addr, 0xdeadc0de)
show("variable @ %s: %s" % (hexify(addr), hexify(ocd.readVariable(addr))))
data = [1, 0, 0xaaaaaaaa, 0x23, 0x42, 0xffff]
wordlen = 32
n = len(data)
read = ocd.readMemory(wordlen, addr, n)
show("memory (before):", list(map(hexify, read)))
ocd.writeMemory(wordlen, addr, n, data)
read = ocd.readMemory(wordlen, addr, n)
show("memory (after):", list(map(hexify, read)))
compareData(read, data)
ocd.send("resume")
| gpl-2.0 |
figaromedias/cestquoilidee | vendor/guzzle/guzzle/docs/conf.py | 469 | 3047 | import sys, os
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True, linenos=1)
lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1)
primary_domain = 'php'
# -- General configuration -----------------------------------------------------
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Guzzle'
copyright = u'2012, Michael Dowling'
version = '3.0.0'
release = '3.0.0'
exclude_patterns = ['_build']
# -- Options for HTML output ---------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Guzzle documentation"
html_short_title = "Guzzle"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'leftbar.html', 'searchbox.html']
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'Guzzledoc'
# -- Guzzle Sphinx theme setup ------------------------------------------------
sys.path.insert(0, '/Users/dowling/projects/guzzle_sphinx_theme')
import guzzle_sphinx_theme
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"index_template": "index.html",
"project_nav_name": "Guzzle",
"github_user": "guzzle",
"github_repo": "guzzle",
"disqus_comments_shortname": "guzzle",
"google_analytics_account": "UA-22752917-1"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Guzzle.tex', u'Guzzle Documentation',
u'Michael Dowling', 'manual'),
]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'guzzle', u'Guzzle Documentation',
[u'Michael Dowling'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Guzzle', u'Guzzle Documentation',
u'Michael Dowling', 'Guzzle', 'One line description of project.',
'Miscellaneous'),
]
| mit |
ButterflyNetwork/bazel | tools/build_defs/pkg/archive_test.py | 2 | 10986 | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing for archive."""
import os
import os.path
import tarfile
import unittest
from tools.build_defs.pkg import archive
from tools.build_defs.pkg import testenv
class SimpleArFileTest(unittest.TestCase):
"""Testing for SimpleArFile class."""
def assertArFileContent(self, arfile, content):
"""Assert that arfile contains exactly the entry described by `content`.
Args:
arfile: the path to the AR file to test.
content: an array describing the expected content of the AR file.
Each entry in that list should be a dictionary where each field
is a field to test in the corresponding SimpleArFileEntry. For
testing the presence of a file "x", then the entry could simply
be `{"filename": "x"}`, the missing field will be ignored.
"""
with archive.SimpleArFile(arfile) as f:
current = f.next()
i = 0
while current:
error_msg = "Extraneous file at end of archive %s: %s" % (
arfile,
current.filename
)
self.assertTrue(i < len(content), error_msg)
for k, v in content[i].items():
value = getattr(current, k)
error_msg = " ".join([
"Value `%s` for key `%s` of file" % (value, k),
"%s in archive %s does" % (current.filename, arfile),
"not match expected value `%s`" % v
])
self.assertEqual(value, v, error_msg)
current = f.next()
i += 1
if i < len(content):
self.fail("Missing file %s in archive %s" % (content[i], arfile))
def testEmptyArFile(self):
self.assertArFileContent(os.path.join(testenv.TESTDATA_PATH, "empty.ar"),
[])
def assertSimpleFileContent(self, names):
datafile = os.path.join(testenv.TESTDATA_PATH, "_".join(names) + ".ar")
content = [{"filename": n,
"size": len(n.encode("utf-8")),
"data": n.encode("utf-8")}
for n in names]
self.assertArFileContent(datafile, content)
def testAFile(self):
self.assertSimpleFileContent(["a"])
def testBFile(self):
self.assertSimpleFileContent(["b"])
def testABFile(self):
self.assertSimpleFileContent(["ab"])
def testA_BFile(self):
self.assertSimpleFileContent(["a", "b"])
def testA_ABFile(self):
self.assertSimpleFileContent(["a", "ab"])
def testA_B_ABFile(self):
self.assertSimpleFileContent(["a", "b", "ab"])
class TarFileWriterTest(unittest.TestCase):
"""Testing for TarFileWriter class."""
def assertTarFileContent(self, tar, content):
"""Assert that tarfile contains exactly the entry described by `content`.
Args:
tar: the path to the TAR file to test.
content: an array describing the expected content of the TAR file.
Each entry in that list should be a dictionary where each field
is a field to test in the corresponding TarInfo. For
testing the presence of a file "x", then the entry could simply
be `{"name": "x"}`, the missing field will be ignored. To match
the content of a file entry, use the key "data".
"""
with tarfile.open(tar, "r:") as f:
i = 0
for current in f:
error_msg = "Extraneous file at end of archive %s: %s" % (
tar,
current.name
)
self.assertTrue(i < len(content), error_msg)
for k, v in content[i].items():
if k == "data":
value = f.extractfile(current).read()
else:
value = getattr(current, k)
error_msg = " ".join([
"Value `%s` for key `%s` of file" % (value, k),
"%s in archive %s does" % (current.name, tar),
"not match expected value `%s`" % v
])
self.assertEqual(value, v, error_msg)
i += 1
if i < len(content):
self.fail("Missing file %s in archive %s" % (content[i], tar))
def setUp(self):
self.tempfile = os.path.join(os.environ["TEST_TMPDIR"], "test.tar")
def tearDown(self):
if os.path.exists(self.tempfile):
os.remove(self.tempfile)
def testEmptyTarFile(self):
with archive.TarFileWriter(self.tempfile):
pass
self.assertTarFileContent(self.tempfile, [])
def assertSimpleFileContent(self, names):
with archive.TarFileWriter(self.tempfile) as f:
for n in names:
f.add_file(n, content=n)
content = ([{"name": "."}] +
[{"name": n,
"size": len(n.encode("utf-8")),
"data": n.encode("utf-8")}
for n in names])
self.assertTarFileContent(self.tempfile, content)
def testAddFile(self):
self.assertSimpleFileContent(["./a"])
self.assertSimpleFileContent(["./b"])
self.assertSimpleFileContent(["./ab"])
self.assertSimpleFileContent(["./a", "./b"])
self.assertSimpleFileContent(["./a", "./ab"])
self.assertSimpleFileContent(["./a", "./b", "./ab"])
def testDottedFiles(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("a")
f.add_file("/b")
f.add_file("./c")
f.add_file("./.d")
f.add_file("..e")
f.add_file(".f")
content = [
{"name": "."}, {"name": "./a"}, {"name": "/b"}, {"name": "./c"},
{"name": "./.d"}, {"name": "./..e"}, {"name": "./.f"}
]
self.assertTarFileContent(self.tempfile, content)
def testAddDir(self):
# For some strange reason, ending slash is stripped by the test
content = [
{"name": ".", "mode": 0o755},
{"name": "./a", "mode": 0o755},
{"name": "./a/b", "data": b"ab", "mode": 0o644},
{"name": "./a/c", "mode": 0o755},
{"name": "./a/c/d", "data": b"acd", "mode": 0o644},
]
tempdir = os.path.join(os.environ["TEST_TMPDIR"], "test_dir")
# Iterate over the `content` array to create the directory
# structure it describes.
for c in content:
if "data" in c:
p = os.path.join(tempdir, c["name"][2:])
os.makedirs(os.path.dirname(p))
with open(p, "wb") as f:
f.write(c["data"])
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("./", tempdir, mode=0o644)
self.assertTarFileContent(self.tempfile, content)
def testMergeTar(self):
content = [
{"name": "./a", "data": b"a"},
{"name": "./ab", "data": b"ab"},
]
for ext in ["", ".gz", ".bz2", ".xz"]:
with archive.TarFileWriter(self.tempfile) as f:
f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar" + ext),
name_filter=lambda n: n != "./b")
self.assertTarFileContent(self.tempfile, content)
def testMergeTarRelocated(self):
content = [
{"name": ".", "mode": 0o755},
{"name": "./foo", "mode": 0o755},
{"name": "./foo/a", "data": b"a"},
{"name": "./foo/ab", "data": b"ab"},
]
with archive.TarFileWriter(self.tempfile) as f:
f.add_tar(os.path.join(testenv.TESTDATA_PATH, "tar_test.tar"),
name_filter=lambda n: n != "./b", root="/foo")
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFile(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileSeparately(self):
d_dir = os.path.join(os.environ["TEST_TMPDIR"], "d_dir")
os.makedirs(d_dir)
with open(os.path.join(d_dir, "dir_file"), "w"):
pass
a_dir = os.path.join(os.environ["TEST_TMPDIR"], "a_dir")
os.makedirs(a_dir)
with open(os.path.join(a_dir, "dir_file"), "w"):
pass
with archive.TarFileWriter(self.tempfile) as f:
f.add_dir("d", d_dir)
f.add_file("d/f")
f.add_dir("a", a_dir)
f.add_file("a/b/f")
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/dir_file"},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/dir_file"},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testAddingDirectoriesForFileManually(self):
with archive.TarFileWriter(self.tempfile) as f:
f.add_file("d", tarfile.DIRTYPE)
f.add_file("d/f")
f.add_file("a", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b/", tarfile.DIRTYPE)
f.add_file("a/b/c/f")
f.add_file("x/y/f")
f.add_file("x", tarfile.DIRTYPE)
content = [
{"name": ".",
"mode": 0o755},
{"name": "./d",
"mode": 0o755},
{"name": "./d/f"},
{"name": "./a",
"mode": 0o755},
{"name": "./a/b",
"mode": 0o755},
{"name": "./a/b/c",
"mode": 0o755},
{"name": "./a/b/c/f"},
{"name": "./x",
"mode": 0o755},
{"name": "./x/y",
"mode": 0o755},
{"name": "./x/y/f"},
]
self.assertTarFileContent(self.tempfile, content)
def testChangingRootDirectory(self):
with archive.TarFileWriter(self.tempfile, root_directory="root") as f:
f.add_file("d", tarfile.DIRTYPE)
f.add_file("d/f")
f.add_file("a", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b", tarfile.DIRTYPE)
f.add_file("a/b/", tarfile.DIRTYPE)
f.add_file("a/b/c/f")
f.add_file("x/y/f")
f.add_file("x", tarfile.DIRTYPE)
content = [
{"name": "root",
"mode": 0o755},
{"name": "root/d",
"mode": 0o755},
{"name": "root/d/f"},
{"name": "root/a",
"mode": 0o755},
{"name": "root/a/b",
"mode": 0o755},
{"name": "root/a/b/c",
"mode": 0o755},
{"name": "root/a/b/c/f"},
{"name": "root/x",
"mode": 0o755},
{"name": "root/x/y",
"mode": 0o755},
{"name": "root/x/y/f"},
]
self.assertTarFileContent(self.tempfile, content)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
40223137/w17w17 | static/Brython3.1.3-20150514-095342/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| gpl-3.0 |
CoderSherlock/Mondroid | kernelsource/msm/scripts/build-all.py | 704 | 14699 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def build_threads():
"""Determine the number of build threads requested by the user"""
if all_options.load_average:
return all_options.load_average
return all_options.jobs or 1
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self):
self.sequence = []
self.lock = threading.Lock()
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = build_threads()
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env))
steps.append(CopyfileStep(savedefconfig, defconfig))
return steps
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# If we are requesting multiple builds, divide down the job number
# to construct the make_command, giving it a floor of 2, so there
# is still some parallelism.
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / len(targets), 2)
make_command.append("-j" + str(j))
tracker = BuildTracker()
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
AmandaMoen/AmandaMoen | students/JasonTyler/former_mailroom.py | 1 | 5690 | #!/usr/bin/env python
"""Mailroom assignment, non-dictionary version"""
# Each sublist referred to as a 'donor record'
# with structure: [first_name, last_name, donation1, donation2...]
donor_list = [
["Jane", "Goodall", 324, 234],
["Bob", "Hunter", 2, 3],
["Margie", "Smith", 3],
["Tommmy", "Jarrel", 56],
["Ryan", "McKash", 2345, 234, 1234]
]
def make_choice():
"""Prompt user to choose an action"""
print "Make a choice"
print "1 to Send a Thank You"
print "2 to Create Report"
print "q to Quit"
user_choice = get_input()
if user_choice == "1": send_thankyou()
elif user_choice == "2": create_report()
elif user_choice == "q": quit_mailroom()
else: make_choice()
def send_thankyou():
"""Format 'Thank You' letter to send to donor"""
print "Make a choice"
print "Full name of donor to Send a Thank You"
print "list to Show List of Donor Names"
print "b to Return Back"
user_choice = get_input()
donor = get_donor_from_str(user_choice)
donor_name = parse_str_to_donor_name(user_choice)
if user_choice == "b": make_choice()
elif user_choice == 'list':
print_all_donor_names()
send_thankyou()
elif donor:
add_donation(donor, get_donation())
print_thankyou(donor)
elif donor_name:
donor = create_donor(donor_name)
add_donation(donor, get_donation())
print_thankyou(donor)
else:
send_thankyou()
# Exit function with make_choice call
make_choice()
def create_report():
"""
Create a report of all donor donations
columns:
-----
name total donated number of donations avg donation
-----
"""
report_list = list()
for donor in donor_list:
name = " ". join([donor[0], donor[1]])
total = str(get_total_donation(donor))
number = str(get_number_donations(donor))
avg = str(get_average_donation(donor))
report_list.append([name, total, number, avg])
print "name\t\ttotal\tnumber\taverage"
print "----------------------------------------"
for row in report_list:
print "\t".join(row)
# Exit function with make_choice call
make_choice()
def print_thankyou(donor_record):
"""Print a donor 'Thank You' to console"""
print """Dear %s,\
\nThank you for your very kind donation of $%s, it is much appreciated."""\
% (donor_record[0], donor_record[len(donor_record) -1])
def create_donor(name_tuple, donation=None):
"""
Create a new donor and return reference to donor_record
Only names with exactly two name_tuple terms are used. parse_str_to_donor_name()
should be used to feed name_tuple from raw input.
"""
try:
first_name, last_name = name_tuple
except ValueError:
return False
else:
donor_list.append([first_name, last_name])
donor = donor_list[len(donor_list) - 1]
if not donation:
pass
else:
add_donation(donor, donation)
return donor
def get_donor(name):
"""Use first_name and last_name to return a donor_record"""
try:
first_name = name[0]
last_name = name[1]
except (TypeError, IndexError):
return None
else:
for donor_record in donor_list:
if donor_record[0] == first_name and donor_record[1] == last_name:
return donor_record
else:
return None
def get_donor_from_str(string):
"""Get donor record from a string"""
name = parse_str_to_donor_name(string)
return get_donor(name)
def parse_str_to_donor_name(string):
"""Take a string and return first_name, last_name"""
name_list = string.split(" ")
for element in name_list[:]:
if element == " " or not element:
del element
else:
pass
if len(name_list) == 2:
first, last = name_list[0], name_list[1]
return (first, last)
else:
return None
def get_donation():
"""Prompt for and validate a donation amount"""
while True:
print "Enter a donation amount"
donation = get_input()
try:
donation = int(donation)
except ValueError:
pass
else:
return donation
def add_donation(donor_record, donation):
"""Add a donation to a donor_record"""
valid = isinstance(donation, (int, float))
if not valid:
return False
else:
donor_record.append(donation)
return True
def get_average_donation(donor_record):
"""Get average donation for a donor from donor_record"""
total = get_total_donation(donor_record)
number = get_number_donations(donor_record)
return float(total)/number
def get_total_donation(donor_record):
"""Get total donation for a donor from donor_record"""
total = 0
for donation in donor_record:
if isinstance(donation, (int, float)):
total += donation
else:
pass
return total
def get_number_donations(donor_record):
"""Get number of donations for a donor from donor_record"""
return (len(donor_record) - 2)
def print_all_donor_names():
"""Print list of donor names to terminal"""
names = list()
for i in donor_list:
formatted_name = "%s %s" % (i[0], i[1])
names.append(formatted_name)
print names
del names
def get_input():
"""Get user input"""
return raw_input(">>> ").strip()
def quit_mailroom():
"""Exit out of mailroom module"""
exit(0)
if __name__ == "__main__":
# Start everything off with an initial call
make_choice()
| gpl-2.0 |
halvertoluke/edx-platform | lms/djangoapps/verify_student/tests/test_services.py | 33 | 8129 | """
Tests of re-verification service.
"""
import ddt
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from lms.djangoapps.verify_student.models import VerificationCheckpoint, VerificationStatus, SkippedReverification
from lms.djangoapps.verify_student.services import ReverificationService
from openedx.core.djangoapps.credit.api import get_credit_requirement_status, set_credit_requirements
from openedx.core.djangoapps.credit.models import CreditCourse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
class TestReverificationService(ModuleStoreTestCase):
"""
Tests for the re-verification service.
"""
def setUp(self):
super(TestReverificationService, self).setUp()
self.user = UserFactory.create(username="rusty", password="test")
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
self.course_id = self.course.id
CourseModeFactory(
mode_slug="verified",
course_id=self.course_id,
min_price=100,
)
self.course_key = CourseKey.from_string(unicode(self.course_id))
self.item = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.final_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/final_uuid'.format(
org=self.course_id.org, course=self.course_id.course
)
# Enroll in a verified mode
self.enrollment = CourseEnrollment.enroll(self.user, self.course_id, mode=CourseMode.VERIFIED)
@ddt.data('final', 'midterm')
def test_start_verification(self, checkpoint_name):
"""Test the 'start_verification' service method.
Check that if a reverification checkpoint exists for a specific course
then 'start_verification' method returns that checkpoint otherwise it
creates that checkpoint.
"""
reverification_service = ReverificationService()
checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/{checkpoint}'.format(
org=self.course_id.org, course=self.course_id.course, checkpoint=checkpoint_name
)
expected_url = (
'/verify_student/reverify'
'/{course_key}'
'/{checkpoint_location}/'
).format(course_key=unicode(self.course_id), checkpoint_location=checkpoint_location)
self.assertEqual(
reverification_service.start_verification(unicode(self.course_id), checkpoint_location),
expected_url
)
def test_get_status(self):
"""Test the verification statuses of a user for a given 'checkpoint'
and 'course_id'.
"""
reverification_service = ReverificationService()
self.assertIsNone(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
)
checkpoint_obj = VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='submitted')
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'submitted'
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='approved')
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'approved'
)
def test_skip_verification(self):
"""
Test adding skip attempt of a user for a reverification checkpoint.
"""
reverification_service = ReverificationService()
VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(
SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(),
1
)
# now test that a user can have only one entry for a skipped
# reverification for a course
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(
SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(),
1
)
# testing service for skipped attempt.
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'skipped'
)
def test_declined_verification_on_skip(self):
"""Test that status with value 'declined' is added in credit
requirement status model when a user skip's an ICRV.
"""
reverification_service = ReverificationService()
checkpoint = VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
# Create credit course and set credit requirements.
CreditCourse.objects.create(course_key=self.course_key, enabled=True)
set_credit_requirements(
self.course_key,
[
{
"namespace": "reverification",
"name": checkpoint.checkpoint_location,
"display_name": "Assessment 1",
"criteria": {},
}
]
)
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
requirement_status = get_credit_requirement_status(
self.course_key, self.user.username, 'reverification', checkpoint.checkpoint_location
)
self.assertEqual(SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(), 1)
self.assertEqual(len(requirement_status), 1)
self.assertEqual(requirement_status[0].get('name'), checkpoint.checkpoint_location)
self.assertEqual(requirement_status[0].get('status'), 'declined')
def test_get_attempts(self):
"""Check verification attempts count against a user for a given
'checkpoint' and 'course_id'.
"""
reverification_service = ReverificationService()
course_id = unicode(self.course_id)
self.assertEqual(
reverification_service.get_attempts(self.user.id, course_id, self.final_checkpoint_location),
0
)
# now create a checkpoint and add user's entry against it then test
# that the 'get_attempts' service method returns correct count
checkpoint_obj = VerificationCheckpoint.objects.create(
course_id=course_id,
checkpoint_location=self.final_checkpoint_location
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='submitted')
self.assertEqual(
reverification_service.get_attempts(self.user.id, course_id, self.final_checkpoint_location),
1
)
def test_not_in_verified_track(self):
# No longer enrolled in a verified track
self.enrollment.update_enrollment(mode=CourseMode.HONOR)
# Should be marked as "skipped" (opted out)
service = ReverificationService()
status = service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(status, service.NON_VERIFIED_TRACK)
| agpl-3.0 |
squilter/mavlink | pymavlink/tools/mavmission.py | 28 | 1639 | #!/usr/bin/env python
'''
extract mavlink mission from log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--output", default='mission.txt', help="output file")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil, mavwp
parms = {}
def mavmission(logfile):
'''extract mavlink mission'''
mlog = mavutil.mavlink_connection(filename)
wp = mavwp.MAVWPLoader()
while True:
m = mlog.recv_match(type=['MISSION_ITEM','CMD','WAYPOINT'])
if m is None:
break
if m.get_type() == 'CMD':
m = mavutil.mavlink.MAVLink_mission_item_message(0,
0,
m.CNum,
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
m.CId,
0, 1,
m.Prm1, m.Prm2, m.Prm3, m.Prm4,
m.Lat, m.Lng, m.Alt)
if m.current >= 2:
continue
while m.seq > wp.count():
print("Adding dummy WP %u" % wp.count())
wp.set(m, wp.count())
wp.set(m, m.seq)
wp.save(args.output)
print("Saved %u waypoints to %s" % (wp.count(), args.output))
total = 0.0
for filename in args.logs:
mavmission(filename)
| lgpl-3.0 |
oasiswork/odoo | addons/sales_team/sales_team.py | 180 | 6131 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.osv):
_name = "crm.case.section"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Teams"
_order = "complete_name"
_period_number = 5
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': tools.ustr((month_begin + relativedelta.relativedelta(months=-i)).strftime('%B %Y')),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"false, it will allow you to hide the sales team without removing it."),
'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids': fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"),
'parent_id': fields.many2one('crm.case.section', 'Parent Team'),
'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'),
'note': fields.text('Description'),
'working_hours': fields.float('Working Hours', digits=(16, 2)),
'color': fields.integer('Color Index'),
}
_defaults = {
'active': 1,
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
"""Overrides orm name_get method"""
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context)
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'default_section_id': fields.many2one('crm.case.section', 'Default Sales Team'),
}
def __init__(self, pool, cr):
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['default_section_id'])
return init_res
| agpl-3.0 |
odin1314/security_monkey | security_monkey/watchers/s3.py | 8 | 8217 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.s3
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey.exceptions import S3PermissionsIssue
from security_monkey.exceptions import S3ACLReturnedNoneDisplayName
from security_monkey import app
from boto.s3.connection import OrdinaryCallingFormat
import boto
import json
def get_lifecycle_rules(bhandle):
lifecycle = []
try:
lc = bhandle.get_lifecycle_config()
except:
# throws an exception (404) when bucket has no lifecycle configurations
return lifecycle
for rule in lc:
lc_rule = {
'id': rule.id,
'status': rule.status,
'prefix': rule.prefix,
}
if rule.transition:
lc_rule['transition'] = {
'days': rule.transition.days,
'date': rule.transition.date,
'storage_class': rule.transition.storage_class
}
if rule.expiration:
lc_rule['expiration'] = {
'days': rule.expiration.days,
'date': rule.expiration.date
}
lifecycle.append(lc_rule)
return lifecycle
class S3(Watcher):
index = 's3'
i_am_singular = 'S3 Bucket'
i_am_plural = 'S3 Buckets'
region_mappings = dict(APNortheast='ap-northeast-1', APSoutheast='ap-southeast-1', APSoutheast2='ap-southeast-2',
DEFAULT='', EU='eu-west-1', SAEast='sa-east-1', USWest='us-west-1', USWest2='us-west-2')
def __init__(self, accounts=None, debug=False):
super(S3, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of S3 Buckets.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
s3conn = connect(account, 's3', calling_format=OrdinaryCallingFormat())
all_buckets = self.wrap_aws_rate_limited_call(
s3conn.get_all_buckets
)
except Exception as e:
exc = BotoConnectionIssue(str(e), 's3', account, None)
self.slurp_exception((self.index, account), exc, exception_map)
continue
for bucket in all_buckets:
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, bucket.name, account))
if self.check_ignore_list(bucket.name):
continue
try:
loc = self.wrap_aws_rate_limited_call(bucket.get_location)
region = self.translate_location_to_region(loc)
if region == '':
s3regionconn = self.wrap_aws_rate_limited_call(
connect,
account,
's3',
calling_format=OrdinaryCallingFormat()
)
region = 'us-east-1'
else:
s3regionconn = self.wrap_aws_rate_limited_call(
connect,
account,
's3',
region=region,
calling_format=OrdinaryCallingFormat()
)
bhandle = self.wrap_aws_rate_limited_call(
s3regionconn.get_bucket,
bucket,
validate=False
)
s3regionconn.close()
except Exception as e:
exc = S3PermissionsIssue(bucket.name)
# Unfortunately, we can't get the region, so the entire account
# will be skipped in find_changes, not just the bad bucket.
self.slurp_exception((self.index, account), exc, exception_map)
continue
app.logger.debug("Slurping %s (%s) from %s/%s" % (self.i_am_singular, bucket.name, account, region))
bucket_dict = self.conv_bucket_to_dict(bhandle, account, region, bucket.name, exception_map)
item = S3Item(account=account, region=region, name=bucket.name, config=bucket_dict)
item_list.append(item)
return item_list, exception_map
def translate_location_to_region(self, location):
if location in self.region_mappings:
return self.region_mappings[location]
else:
return location
def conv_bucket_to_dict(self, bhandle, account, region, bucket_name, exception_map):
"""
Converts the bucket ACL and Policy information into a python dict that we can save.
"""
bucket_dict = {}
grantees = {}
acl = self.wrap_aws_rate_limited_call(
bhandle.get_acl
)
aclxml = self.wrap_aws_rate_limited_call(
acl.to_xml
)
if '<DisplayName>None</DisplayName>' in aclxml:
# Boto sometimes returns XML with strings like:
# <DisplayName>None</DisplayName>
# Wait a little while, and it will return the real DisplayName
# The console will display "Me" as the Grantee when we see these None
# DisplayNames in boto.
exc = S3ACLReturnedNoneDisplayName(bucket_name)
self.slurp_exception((self.index, account, region, bucket_name), exc, exception_map)
else:
for grant in acl.acl.grants:
if grant.display_name == 'None' or grant.display_name == 'null':
app.logger.info("Received a bad display name: %s" % grant.display_name)
if grant.display_name is None:
gname = grant.uri
else:
gname = grant.display_name
if gname in grantees:
grantees[gname].append(grant.permission)
grantees[gname] = sorted(grantees[gname])
else:
grantees[gname] = [grant.permission]
bucket_dict['grants'] = grantees
try:
policy = self.wrap_aws_rate_limited_call(
bhandle.get_policy
)
policy = json.loads(policy)
bucket_dict['policy'] = policy
except boto.exception.S3ResponseError as e:
# S3ResponseError is raised if there is no policy.
# Simply ignore.
pass
# {} or {'Versioning': 'Enabled'} or {'MfaDelete': 'Disabled', 'Versioning': 'Enabled'}
bucket_dict['versioning'] = self.wrap_aws_rate_limited_call(
bhandle.get_versioning_status
)
bucket_dict['lifecycle_rules'] = get_lifecycle_rules(bhandle)
return bucket_dict
class S3Item(ChangeItem):
def __init__(self, account=None, region=None, name=None, config={}):
super(S3Item, self).__init__(
index=S3.index,
region=region,
account=account,
name=name,
new_config=config)
| apache-2.0 |
ondryaso/pi-rc522 | setup.py | 1 | 1187 | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'pirc522')))
from pirc522 import __version__ # flake8: noqa
sys.path.pop(0)
setup(
name='pi-rc522',
packages=find_packages(),
include_package_data=True,
version=__version__,
description='Raspberry Pi Python library for SPI RFID RC522 module.',
long_description='Raspberry Pi Python library for SPI RFID RC522 module.',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
author='ondryaso',
author_email='ondryaso@ondryaso.eu',
url='https://github.com/ondryaso/pi-rc522',
license='MIT',
install_requires=['spidev', 'RPi.GPIO'],
)
| mit |
mscuthbert/abjad | abjad/tools/lilypondparsertools/test/test_lilypondparsertools_LilyPondParser__spanners__Slur.py | 2 | 2745 | # -*- encoding: utf-8 -*-
import pytest
from abjad import *
from abjad.tools.lilypondparsertools import LilyPondParser
def test_lilypondparsertools_LilyPondParser__spanners__Slur_01():
r'''Successful slurs, showing single leaf overlap.
'''
target = Container(scoretools.make_notes([0] * 4, [(1, 4)]))
slur = Slur()
attach(slur, target[2:])
slur = Slur()
attach(slur, target[:3])
assert systemtools.TestManager.compare(
target,
r'''
{
c'4 (
c'4
c'4 ) (
c'4 )
}
'''
)
parser = LilyPondParser()
result = parser(format(target))
assert format(target) == format(result) and target is not result
def test_lilypondparsertools_LilyPondParser__spanners__Slur_02():
r'''Swapped start and stop.
'''
target = Container(scoretools.make_notes([0] * 4, [(1, 4)]))
slur = Slur()
attach(slur, target[2:])
slur = Slur()
attach(slur, target[:3])
assert systemtools.TestManager.compare(
target,
r'''
{
c'4 (
c'4
c'4 ) (
c'4 )
}
'''
)
string = r"\relative c' { c ( c c () c ) }"
parser = LilyPondParser()
result = parser(string)
assert format(target) == format(result) and target is not result
def test_lilypondparsertools_LilyPondParser__spanners__Slur_03():
r'''Single leaf.
'''
string = '{ c () c c c }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__Slur_04():
r'''Unterminated.
'''
string = '{ c ( c c c }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__Slur_05():
r'''Unstarted.
'''
string = '{ c c c c ) }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__Slur_06():
r'''Nested.
'''
string = '{ c ( c ( c ) c ) }'
assert pytest.raises(Exception, 'LilyPondParser()(string)')
def test_lilypondparsertools_LilyPondParser__spanners__Slur_07():
r'''With direction.
'''
target = Container(scoretools.make_notes([0] * 4, [(1, 4)]))
slur = Slur(direction=Down)
attach(slur, target[:3])
slur = Slur(direction=Up)
attach(slur, target[2:])
assert systemtools.TestManager.compare(
target,
r'''
{
c'4 _ (
c'4
c'4 ) ^ (
c'4 )
}
'''
)
parser = LilyPondParser()
result = parser(format(target))
assert format(target) == format(result) and target is not result | gpl-3.0 |
balanced/wtforms | tests/ext_csrf.py | 3 | 4399 | from unittest import TestCase
from wtforms.fields import TextField
from wtforms.ext.csrf import SecureForm
from wtforms.ext.csrf.session import SessionSecureForm
import datetime
import hashlib
import hmac
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class InsecureForm(SecureForm):
def generate_csrf_token(self, csrf_context):
return csrf_context
a = TextField()
class FakeSessionRequest(object):
def __init__(self, session):
self.session = session
class StupidObject(object):
a = None
csrf_token = None
class SecureFormTest(TestCase):
def test_base_class(self):
self.assertRaises(NotImplementedError, SecureForm)
def test_basic_impl(self):
form = InsecureForm(csrf_context=42)
self.assertEqual(form.csrf_token.current_token, 42)
self.assert_(not form.validate())
self.assertEqual(len(form.csrf_token.errors), 1)
self.assertEqual(form.csrf_token._value(), 42)
# Make sure csrf_token is taken out from .data
self.assertEqual(form.data, {'a': None})
def test_with_data(self):
post_data = DummyPostData(csrf_token=u'test', a=u'hi')
form = InsecureForm(post_data, csrf_context=u'test')
self.assert_(form.validate())
self.assertEqual(form.data, {'a': u'hi'})
form = InsecureForm(post_data, csrf_context=u'something')
self.assert_(not form.validate())
# Make sure that value is still the current token despite
# the posting of a different value
self.assertEqual(form.csrf_token._value(), u'something')
# Make sure populate_obj doesn't overwrite the token
obj = StupidObject()
form.populate_obj(obj)
self.assertEqual(obj.a, u'hi')
self.assertEqual(obj.csrf_token, None)
def test_with_missing_token(self):
post_data = DummyPostData(a='hi')
form = InsecureForm(post_data, csrf_context=u'test')
self.assert_(not form.validate())
self.assertEqual(form.csrf_token.data, u'')
self.assertEqual(form.csrf_token._value(), u'test')
class SessionSecureFormTest(TestCase):
class SSF(SessionSecureForm):
SECRET_KEY = 'abcdefghijklmnop'.encode('ascii')
class BadTimeSSF(SessionSecureForm):
SECRET_KEY = 'abcdefghijklmnop'.encode('ascii')
TIME_LIMIT = datetime.timedelta(-1, 86300)
class NoTimeSSF(SessionSecureForm):
SECRET_KEY = 'abcdefghijklmnop'.encode('ascii')
TIME_LIMIT = None
def test_basic(self):
self.assertRaises(Exception, SessionSecureForm)
self.assertRaises(TypeError, self.SSF)
session = {}
form = self.SSF(csrf_context=FakeSessionRequest(session))
assert 'csrf' in session
def test_timestamped(self):
session = {}
postdata = DummyPostData(csrf_token=u'fake##fake')
form = self.SSF(postdata, csrf_context=session)
assert 'csrf' in session
assert form.csrf_token._value()
assert form.csrf_token._value() != session['csrf']
assert not form.validate()
self.assertEqual(form.csrf_token.errors[0], u'CSRF failed')
good_token = form.csrf_token._value()
# Now test a valid CSRF with invalid timestamp
evil_form = self.BadTimeSSF(csrf_context=session)
bad_token = evil_form.csrf_token._value()
postdata = DummyPostData(csrf_token=bad_token)
form = self.SSF(postdata, csrf_context=session)
assert not form.validate()
self.assertEqual(form.csrf_token.errors[0], u'CSRF token expired')
def test_notime(self):
session = {}
form = self.NoTimeSSF(csrf_context=session)
hmacced = hmac.new(form.SECRET_KEY, session['csrf'].encode('utf8'), digestmod=hashlib.sha1)
self.assertEqual(form.csrf_token._value(), '##%s' % hmacced.hexdigest())
assert not form.validate()
self.assertEqual(form.csrf_token.errors[0], u'CSRF token missing')
# Test with pre-made values
session = {'csrf': u'00e9fa5fe507251ac5f32b1608e9282f75156a05'}
postdata = DummyPostData(csrf_token=u'##d21f54b7dd2041fab5f8d644d4d3690c77beeb14')
form = self.NoTimeSSF(postdata, csrf_context=session)
assert form.validate()
| bsd-3-clause |
OpenUniversity/ovirt-engine | packaging/setup/plugins/ovirt-engine-remove/base/files/simple.py | 1 | 15307 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple plugin."""
import gettext
import glob
import hashlib
import os
from otopi import constants as otopicons
from otopi import filetransaction, plugin, util
import configparser
from ovirt_engine_setup import constants as osetupcons
from ovirt_setup_lib import dialog
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Simple plugin."""
def _digestFile(self, filename):
md5 = hashlib.new('md5')
# Read file in chunks of 10KB
with open(filename, 'rb') as f:
while True:
data = f.read(10240)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _safeDelete(self, filename):
try:
os.unlink(filename)
except OSError as e:
self.logger.debug(
"Cannot delete '%s'",
filename,
exc_info=True,
)
self.logger.error(
_("Cannot delete '{file}': {error}").format(
file=filename,
error=e,
)
)
def _revertChanges(self, filename, changes):
new_content = []
with open(filename, 'r') as f:
old_content = f.read().splitlines()
just_remove = []
just_add = []
replace = {}
for c in changes:
if 'removed' not in c:
just_remove.append(c['added'])
elif 'added' not in c:
just_add.append(c['removed'])
else:
replace[c['added']] = c['removed']
# For checking if remove/replace lines were found, we work on copies,
# because there might be duplicate lines in the file.
remove_unremoved = just_remove[:]
replace_unremoved = replace.copy()
for line in old_content:
if line in just_remove:
if line in remove_unremoved:
remove_unremoved.remove(line)
else:
# should be updated or added
if line in replace:
orig_line = line
line = replace[line]
if orig_line in replace_unremoved:
del replace_unremoved[orig_line]
new_content.append(line)
new_content.extend(just_add)
if remove_unremoved or replace_unremoved:
self.logger.warning(
_(
'Some changes to {file} could not be reverted. More '
'details can be found in the log.'
).format(
file=filename,
)
)
if remove_unremoved:
self.logger.debug(
(
'The following lines were not found in {file} and so '
'were not removed:\n{lines}'
).format(
file=filename,
lines='\n'.join(
[
'\t{line}'.format(line=newline)
for newline in remove_unremoved
]
),
)
)
if replace_unremoved:
self.logger.debug(
(
'The following lines were not found in {file} and so '
'were not reverted to their old content:\n{lines}'
).format(
file=filename,
lines='\n'.join(
[
'\tnew:\t{new}\n\told:\t{old}\n'.format(
new=new,
old=old,
)
for new, old in replace_unremoved.items()
]
),
)
)
if new_content != old_content:
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=filename,
content=new_content
)
)
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._infos = None
self._files = {}
self._toremove = None
self._lines = {}
self._descriptions = {}
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_GROUPS,
''
)
self.environment.setdefault(
osetupcons.RemoveEnv.ASK_GROUPS,
True
)
self.environment.setdefault(
osetupcons.RemoveEnv.FILES_TO_REMOVE,
[]
)
# TODO: check if we need to allow to override this by answer file.
# Using a list here won't allow you to override this
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST,
[]
)
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_CHANGED,
None
)
self._infos = sorted(
glob.glob(
os.path.join(
osetupcons.FileLocations.OVIRT_SETUP_UNINSTALL_DIR,
'*.conf',
)
)
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
name=osetupcons.Stages.REMOVE_CUSTOMIZATION_GROUPS,
after=(
osetupcons.Stages.REMOVE_CUSTOMIZATION_COMMON,
),
)
def _customization(self):
interactive = self.environment[
osetupcons.RemoveEnv.ASK_GROUPS
]
unremovable = {}
already_asked = []
for info in self._infos:
config = configparser.ConfigParser()
config.optionxform = str
config.read([info])
for section in config.sections():
if section.startswith(
osetupcons.Const.FILE_GROUP_SECTION_PREFIX
):
group = section[
len(osetupcons.Const.FILE_GROUP_SECTION_PREFIX):
]
description = config.get(section, 'description')
template = "%s.description" % group
msg = gettext.dgettext(
message=template,
domain='ovirt-engine-setup'
)
if msg == template:
msg = description
self._descriptions[group] = msg
add_group = self.environment[
osetupcons.RemoveEnv.REMOVE_ALL
]
if not add_group:
if group in self.environment[
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST
]:
add_group = True
if (
not add_group and
interactive and
group not in already_asked
):
if group not in self.environment[
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST
]:
already_asked.append(group)
add_group = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_REMOVE_GROUP/' + group,
note=_(
'Do you want to remove {description}? '
'(@VALUES@) [@DEFAULT@]: '
).format(
description=msg,
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=False,
)
if add_group:
self.environment[
osetupcons.RemoveEnv.
REMOVE_GROUPS
] += ',' + group
def getFiles(section):
files = {}
for name, value in config.items(section):
comps = name.split('.')
if comps[0] == 'file':
files.setdefault(comps[1], {})[comps[2]] = value
# python 2.6 doesn't support dict comprehensions.
# TODO: we may move to it when minimal python version
# available is 2.7+
return dict((f['name'], f['md5']) for f in files.values())
def getLines(section):
associated_lines = {}
aggregated_lines = {}
# line.{file_index:03}{line_index:03}.name
# line.{file_index:03}{line_index:03}.content.added
# line.{file_index:03}{line_index:03}.content.removed
for name, value in config.items(section):
comps = name.split('.')
if comps[0] == 'line':
index = comps[1] # '00001', '00002', etc
line_type = comps[2] # 'name' or 'content'
if len(comps) == 3 and line_type == 'content':
comps.append('added')
if line_type == 'content':
action = comps[3] # 'added' or 'removed'
associated_lines.setdefault(index, {})
if line_type == 'name':
associated_lines[index][line_type] = value
elif line_type == 'content':
associated_lines[index].setdefault(line_type, {})[
action
] = value
for f in associated_lines.values():
aggregated_lines.setdefault(
f['name'], []
).append(f['content'])
self.logger.debug(
'getLines: aggregated_lines = %s',
aggregated_lines,
)
return aggregated_lines
for uninstall_group in [
x.strip()
for x in self.environment[
osetupcons.RemoveEnv.REMOVE_GROUPS
].split(',')
if x.strip()
]:
uninstall_section = (
osetupcons.Const.FILE_GROUP_SECTION_PREFIX +
uninstall_group
)
if config.has_section(uninstall_section):
# section could be missing in a conf file, for example if
# PKI config was not done because already existing
self._files.update(
getFiles(uninstall_section)
)
self._lines.update(
getLines(uninstall_section)
)
if config.has_section('unremovable'):
unremovable.update(getFiles('unremovable'))
self._toremove = set(self._files.keys()) - set(unremovable.keys())
changed = []
for f in self._toremove:
if os.path.exists(f):
if self._digestFile(f) != self._files[f]:
changed.append(f)
self.logger.debug('changed=%s', changed)
if changed:
if self.environment[osetupcons.RemoveEnv.REMOVE_CHANGED] is None:
self.environment[
osetupcons.RemoveEnv.REMOVE_CHANGED
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_ENGINE_REMOVE_CHANGED',
note=_(
'The following files were changed since setup:\n'
'{files}\n'
'Remove them anyway? '
'(@VALUES@) [@DEFAULT@]: '
).format(
files='\n'.join(changed),
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=True,
)
if not self.environment[osetupcons.RemoveEnv.REMOVE_CHANGED]:
self._toremove -= set(changed)
self._tomodifylines = self._lines.keys()
self.logger.debug('tomodifylines=%s', self._tomodifylines)
self.logger.debug('files=%s', self._files)
self.logger.debug('unremovable=%s', unremovable)
self.logger.debug('toremove=%s', self._toremove)
self.environment[
osetupcons.RemoveEnv.FILES_TO_REMOVE
] = self._toremove
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
priority=plugin.Stages.PRIORITY_LOW,
)
def _misc(self):
self.logger.info(_('Removing files'))
for f in self._toremove:
if os.path.exists(f):
self._safeDelete(f)
elif os.path.islink(f):
# dead link
self._safeDelete(f)
self.logger.info(_('Reverting changes to files'))
for f in self._tomodifylines:
if os.path.exists(f):
self._revertChanges(f, self._lines[f])
for info in self._infos:
self._safeDelete(info)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
)
def _closeup(self):
all_groups = set(self._descriptions.keys())
uninstalled_groups = set([
x.strip()
for x in self.environment[
osetupcons.RemoveEnv.REMOVE_GROUPS
].split(',')
if x.strip()
])
not_uninstalled = set(all_groups - uninstalled_groups)
for group in not_uninstalled:
self.dialog.note(
text=_(
'{description} files not removed'
).format(
description=self._descriptions[group],
),
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
Zazcallabah/dsfp | tests/test_tools.py | 2 | 1168 | # coding: utf-8
import six
from unittest import TestCase
from dsfp.tools import BinDiff
__all__ = ['TestBinDiff', ]
class TestBinDiff(TestCase):
"""Tools tests """
maxDiff = None
diff_a = six.b('\x00\x01\x00\x04\x44\x53\x60\x34\x90\x88\x13\x08')
diff_b = six.b('\x01\x00\x00\x04\x42\x53\x61\x34\x93\x88\x13\x08')
skip_tables = (
{
'SKIP_TABLE': [
{"comment": "unknown data", "offset": 0, "size": 2},
]
},
)
diff = [
{'diff': [0, 1], 'offset': 0},
{'diff': [0, 2], 'offset': 4},
{'diff': [0], 'offset': 8}
]
diff_patched = [
{'diff': [0, 2], 'offset': 4},
{'diff': [0], 'offset': 8}
]
def setUp(self):
pass
def test_bin_diff_match(self):
diff_obj = BinDiff(self.diff_a, self.diff_b)
diff = diff_obj.process_diff()
self.assertEqual(diff, self.diff)
def test_bin_diff_patch_table(self):
diff_obj = BinDiff(self.diff_a, self.diff_b,
skip_tables=self.skip_tables)
diff = diff_obj.process_diff()
self.assertEqual(diff, self.diff_patched) | mit |
t0mk/ansible | lib/ansible/modules/identity/ipa/ipa_sudorule.py | 20 | 16243 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ipa_sudorule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo rule
description:
- Add, modify or delete sudo rule within IPA server using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
cmdcategory:
description:
- Command category the rule applies to.
choices: ['all']
required: false
cmd:
description:
- List of commands assigned to the rule.
- If an empty list is passed all commands will be removed from the rule.
- If option is omitted commands will not be checked or changed.
required: false
host:
description:
- List of hosts assigned to the rule.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
- Option C(hostcategory) must be omitted to assign hosts.
required: false
hostcategory:
description:
- Host category the rule applies to.
- If 'all' is passed one must omit C(host) and C(hostgroup).
- Option C(host) and C(hostgroup) must be omitted to assign 'all'.
choices: ['all']
required: false
hostgroup:
description:
- List of host groups assigned to the rule.
- If an empty list is passed all host groups will be removed from the rule.
- If option is omitted host groups will not be checked or changed.
- Option C(hostcategory) must be omitted to assign host groups.
required: false
user:
description:
- List of users assigned to the rule.
- If an empty list is passed all users will be removed from the rule.
- If option is omitted users will not be checked or changed.
required: false
usercategory:
description:
- User category the rule applies to.
choices: ['all']
required: false
usergroup:
description:
- List of user groups assigned to the rule.
- If an empty list is passed all user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
required: false
state:
description: State to ensure
required: false
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo rule is present thats allows all every body to execute any command on any host without beeing asked for a password.
- ipa_sudorule:
name: sudo_all_nopasswd
cmdcategory: all
description: Allow to run every command with sudo without password
hostcategory: all
sudoopt:
- '!authenticate'
usercategory: all
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
- ipa_sudorule:
name: sudo_dev_dbserver
description: Allow developers to run every command with sudo on all database server
cmdcategory: all
host:
- db01.example.com
hostgroup:
- db-server
sudoopt:
- '!authenticate'
usergroup:
- developers
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudorule:
description: Sudorule as returned by IPA
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.ipa import IPAClient
class SudoRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
def sudorule_find(self, name):
return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
def sudorule_add(self, name, item):
return self._post_json(method='sudorule_add', name=name, item=item)
def sudorule_mod(self, name, item):
return self._post_json(method='sudorule_mod', name=name, item=item)
def sudorule_del(self, name):
return self._post_json(method='sudorule_del', name=name)
def sudorule_add_option(self, name, item):
return self._post_json(method='sudorule_add_option', name=name, item=item)
def sudorule_add_option_ipasudoopt(self, name, item):
return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
def sudorule_remove_option(self, name, item):
return self._post_json(method='sudorule_remove_option', name=name, item=item)
def sudorule_remove_option_ipasudoopt(self, name, item):
return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
def sudorule_add_host(self, name, item):
return self._post_json(method='sudorule_add_host', name=name, item=item)
def sudorule_add_host_host(self, name, item):
return self.sudorule_add_host(name=name, item={'host': item})
def sudorule_add_host_hostgroup(self, name, item):
return self.sudorule_add_host(name=name, item={'hostgroup': item})
def sudorule_remove_host(self, name, item):
return self._post_json(method='sudorule_remove_host', name=name, item=item)
def sudorule_remove_host_host(self, name, item):
return self.sudorule_remove_host(name=name, item={'host': item})
def sudorule_remove_host_hostgroup(self, name, item):
return self.sudorule_remove_host(name=name, item={'hostgroup': item})
def sudorule_add_allow_command(self, name, item):
return self._post_json(method='sudorule_add_allow_command', name=name, item=item)
def sudorule_remove_allow_command(self, name, item):
return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
def sudorule_add_user(self, name, item):
return self._post_json(method='sudorule_add_user', name=name, item=item)
def sudorule_add_user_user(self, name, item):
return self.sudorule_add_user(name=name, item={'user': item})
def sudorule_add_user_group(self, name, item):
return self.sudorule_add_user(name=name, item={'group': item})
def sudorule_remove_user(self, name, item):
return self._post_json(method='sudorule_remove_user', name=name, item=item)
def sudorule_remove_user_user(self, name, item):
return self.sudorule_remove_user(name=name, item={'user': item})
def sudorule_remove_user_group(self, name, item):
return self.sudorule_remove_user(name=name, item={'group': item})
def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None):
data = {}
if cmdcategory is not None:
data['cmdcategory'] = cmdcategory
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if usercategory is not None:
data['usercategory'] = usercategory
return data
def get_sudorule_diff(ipa_sudorule, module_sudorule):
data = []
for key in module_sudorule.keys():
module_value = module_sudorule.get(key, None)
ipa_value = ipa_sudorule.get(key, None)
if isinstance(ipa_value, list) and not isinstance(module_value, list):
module_value = [module_value]
if isinstance(ipa_value, list) and isinstance(module_value, list):
ipa_value = sorted(ipa_value)
module_value = sorted(module_value)
if ipa_value != module_value:
data.append(key)
return data
def category_changed(module, client, category_name, ipa_sudorule):
if ipa_sudorule.get(category_name, None) == ['all']:
if not module.check_mode:
# cn is returned as list even with only a single value.
client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
return True
return False
def ensure(module, client):
state = module.params['state']
name = module.params['name']
cmd = module.params['cmd']
cmdcategory = module.params['cmdcategory']
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
sudoopt = module.params['sudoopt']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
usercategory=usercategory)
ipa_sudorule = client.sudorule_find(name=name)
changed = False
if state in ['present', 'disabled', 'enabled']:
if not ipa_sudorule:
changed = True
if not module.check_mode:
ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
else:
diff = get_sudorule_diff(client, ipa_sudorule, module_sudorule)
if len(diff) > 0:
changed = True
if not module.check_mode:
if 'hostcategory' in diff:
if ipa_sudorule.get('memberhost_host', None) is not None:
client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
client.sudorule_remove_host_hostgroup(name=name,
item=ipa_sudorule.get('memberhost_hostgroup'))
client.sudorule_mod(name=name, item=module_sudorule)
if cmd is not None:
changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
if not module.check_mode:
client.sudorule_add_allow_command(name=name, item=cmd)
if host is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
client.sudorule_add_host_host,
client.sudorule_remove_host_host) or changed
if hostgroup is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
client.sudorule_add_host_hostgroup,
client.sudorule_remove_host_hostgroup) or changed
if sudoopt is not None:
changed = client.modify_if_diff(name, ipa_sudorule.get('ipasudoopt', []), sudoopt,
client.sudorule_add_option_ipasudoopt,
client.sudorule_remove_option_ipasudoopt) or changed
if user is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
client.sudorule_add_user_user,
client.sudorule_remove_user_user) or changed
if usergroup is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
client.sudorule_add_user_group,
client.sudorule_remove_user_group) or changed
else:
if ipa_sudorule:
changed = True
if not module.check_mode:
client.sudorule_del(name)
return changed, client.sudorule_find(name)
def main():
module = AnsibleModule(
argument_spec=dict(
cmd=dict(type='list', required=False),
cmdcategory=dict(type='str', required=False, choices=['all']),
cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str', required=False),
host=dict(type='list', required=False),
hostcategory=dict(type='str', required=False, choices=['all']),
hostgroup=dict(type='list', required=False),
sudoopt=dict(type='list', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list', required=False),
usercategory=dict(type='str', required=False, choices=['all']),
usergroup=dict(type='list', required=False),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
mutually_exclusive=[['cmdcategory', 'cmd'],
['hostcategory', 'host'],
['hostcategory', 'hostgroup'],
['usercategory', 'user'],
['usercategory', 'usergroup']],
supports_check_mode=True,
)
client = SudoRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudorule = ensure(module, client)
module.exit_json(changed=changed, sudorule=sudorule)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
RobLoach/lutris | lutris/gui/sidebar.py | 1 | 6274 | from gi.repository import Gtk, GdkPixbuf, GObject
from lutris import runners
from lutris import platforms
from lutris import pga
from lutris.gui.runnerinstalldialog import RunnerInstallDialog
from lutris.gui.config_dialogs import RunnerConfigDialog
from lutris.gui.runnersdialog import RunnersDialog
from lutris.gui.widgets import get_runner_icon
TYPE = 0
SLUG = 1
ICON = 2
LABEL = 3
class SidebarTreeView(Gtk.TreeView):
def __init__(self):
super(SidebarTreeView, self).__init__()
self.installed_runners = []
self.active_platforms = []
self.model = Gtk.TreeStore(str, str, GdkPixbuf.Pixbuf, str)
self.model_filter = self.model.filter_new()
self.model_filter.set_visible_func(self.filter_rule)
self.set_model(self.model_filter)
column = Gtk.TreeViewColumn("Runners")
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
# Type
type_renderer = Gtk.CellRendererText()
type_renderer.set_visible(False)
column.pack_start(type_renderer, True)
column.add_attribute(type_renderer, "text", TYPE)
# Runner slug
text_renderer = Gtk.CellRendererText()
text_renderer.set_visible(False)
column.pack_start(text_renderer, True)
column.add_attribute(text_renderer, "text", SLUG)
# Icon
icon_renderer = Gtk.CellRendererPixbuf()
icon_renderer.set_property('width', 20)
column.pack_start(icon_renderer, False)
column.add_attribute(icon_renderer, "pixbuf", ICON)
# Label
text_renderer2 = Gtk.CellRendererText()
column.pack_start(text_renderer2, True)
column.add_attribute(text_renderer2, "text", LABEL)
self.append_column(column)
self.set_headers_visible(False)
self.set_fixed_height_mode(True)
self.get_selection().set_mode(Gtk.SelectionMode.BROWSE)
self.connect('button-press-event', self.popup_contextual_menu)
GObject.add_emission_hook(RunnersDialog, "runner-installed", self.update)
self.runners = sorted(runners.__all__)
self.platforms = sorted(platforms.__all__)
self.platform_node = None
self.load_runners()
self.load_platforms()
self.update()
self.expand_all()
def load_runners(self):
"""Append runners to the model."""
self.runner_node = self.model.append(None, ['runners', '', None, "All runners"])
for slug in self.runners:
self.add_runner(slug)
def add_runner(self, slug):
name = runners.import_runner(slug).human_name
icon = get_runner_icon(slug, format='pixbuf', size=(16, 16))
self.model.append(self.runner_node, ['runners', slug, icon, name])
def load_platforms(self):
"""Update platforms in the model."""
self.platform_node = self.model.append(None, ['platforms', '', None, "All platforms"])
for platform in self.platforms:
self.add_platform(platform)
def add_platform(self, name):
self.model.append(self.platform_node, ['platforms', name, None, name])
def get_selected_filter(self):
"""Return the selected runner's name."""
selection = self.get_selection()
if not selection:
return
model, iter = selection.get_selected()
if not iter:
return
type = model.get_value(iter, TYPE)
slug = model.get_value(iter, SLUG)
return (type, slug)
def filter_rule(self, model, iter, data):
if not model[iter][0]:
return False
if (model[iter][0] == 'runners' or model[iter][0] == 'platforms') and model[iter][1] == '':
return True
return (model[iter][0] == 'runners' and model[iter][1] in self.installed_runners) or \
(model[iter][0] == 'platforms' and model[iter][1] in self.active_platforms)
def update(self, *args):
self.installed_runners = [runner.name for runner in runners.get_installed()]
self.active_platforms = pga.get_used_platforms()
self.model_filter.refilter()
self.expand_all()
# Return False here because this method is called with GLib.idle_add
return False
def popup_contextual_menu(self, view, event):
if event.button != 3:
return
view.current_path = view.get_path_at_pos(event.x, event.y)
if view.current_path:
view.set_cursor(view.current_path[0])
type, slug = self.get_selected_filter()
if type != 'runners' or not slug or slug not in self.runners:
return
menu = ContextualMenu()
menu.popup(event, slug, self.get_toplevel())
class ContextualMenu(Gtk.Menu):
def __init__(self):
super(ContextualMenu, self).__init__()
def add_menuitems(self, entries):
for entry in entries:
name = entry[0]
label = entry[1]
action = Gtk.Action(name=name, label=label)
action.connect('activate', entry[2])
menuitem = action.create_menu_item()
menuitem.action_id = name
self.append(menuitem)
def popup(self, event, runner_slug, parent_window):
self.runner = runners.import_runner(runner_slug)()
self.parent_window = parent_window
# Clear existing menu
for item in self.get_children():
self.remove(item)
# Add items
entries = [('configure', 'Configure', self.on_configure_runner)]
if self.runner.multiple_versions:
entries.append(('versions', 'Manage versions',
self.on_manage_versions))
if self.runner.runnable_alone:
entries.append(('run', 'Run', self.runner.run))
self.add_menuitems(entries)
self.show_all()
super(ContextualMenu, self).popup(None, None, None, None,
event.button, event.time)
def on_configure_runner(self, *args):
RunnerConfigDialog(self.runner, parent=self.parent_window)
def on_manage_versions(self, *args):
dlg_title = "Manage %s versions" % self.runner.name
RunnerInstallDialog(dlg_title, self.parent_window, self.runner.name)
| gpl-3.0 |
ubiar/odoo | addons/base_gengo/__init__.py | 377 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
balajiln/mondrianforest | src/mondrianforest.py | 1 | 72853 | #!/usr/bin/env python
#
# Example usage:
#
# NOTE:
# optype=real: Gaussian parametrization uses a non-linear transformation of split times
# variance should decrease as split_time increases:
# variance at node j = variance_coef * (sigmoid(sigmoid_coef * t_j) - sigmoid(sigmoid_coef * t_{parent(j)}))
# non-linear transformation should be a monotonically non-decreasing function
# sigmoid has a saturation effect: children will be similar to parent as we go down the tree
# split times t_j scales inversely with the number of dimensions
import sys
import os
import optparse
import math
import time
import cPickle as pickle
import random
import pprint as pp
import numpy as np
from warnings import warn
from utils import hist_count, logsumexp, softmax, sample_multinomial, \
sample_multinomial_scores, empty, assert_no_nan, check_if_zero, check_if_one, \
multiply_gaussians, divide_gaussians, sigmoid, logsumexp_array
from mondrianforest_utils import Forest, Param, parser_add_common_options, parser_check_common_options, \
bootstrap, parser_add_mf_options, parser_check_mf_options, reset_random_seed, \
load_data, add_stuff_2_settings, compute_gaussian_pdf, compute_gaussian_logpdf, \
get_filename_mf, precompute_minimal, compute_left_right_statistics, \
create_prediction_tree, init_prediction_tree, update_predictive_posterior_node, \
compute_metrics_classification, compute_metrics_regression, \
update_posterior_node_incremental, init_update_posterior_node_incremental
from itertools import izip, count, chain
from collections import defaultdict
try:
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import rc
rc('font', **{'family':'serif'})
rc('text', usetex=True)
rc('legend', handlelength=4)
rc('legend', **{'fontsize':9})
except:
warn('matplotlib not loaded: plotting not possible; set draw_mondrian=0')
try:
import pydot
except:
warn('pydot not loaded: tree will not be printed; set draw_mondrian=0')
# setting numpy options to debug RuntimeWarnings
#np.seterr(divide='raise')
np.seterr(divide='ignore') # to avoid warnings for np.log(0)
np.seterr(invalid='ignore') # to avoid warnings for inf * 0 = nan
np.set_printoptions(precision=3)
np.set_printoptions(linewidth=200)
# color scheme for mondrian
# colors_list = ['DarkRed', 'Navy', 'DimGray', 'Beige']
# other nice colors: Beige, MediumBlue, DarkRed vs FireBrick
colors_list = ['LightGray'] # paused leaf will always be shaded gray
LW = 2
FIGSIZE = (12, 9)
INF = np.inf
def process_command_line():
parser = parser_add_common_options()
parser = parser_add_mf_options(parser)
settings, args = parser.parse_args()
add_stuff_2_settings(settings)
if settings.optype == 'class':
settings.alpha = 0 # normalized stable prior
assert settings.smooth_hierarchically
parser_check_common_options(parser, settings)
parser_check_mf_options(parser, settings)
if settings.budget < 0:
settings.budget_to_use = INF
else:
settings.budget_to_use = settings.budget
return settings
class MondrianBlock(object):
"""
defines Mondrian block
variables:
- min_d : dimension-wise min of training data in current block
- max_d : dimension-wise max of training data in current block
- range_d : max_d - min_d
- sum_range_d : sum of range_d
- left : id of left child
- right : id of right child
- parent : id of parent
- is_leaf : boolen variable to indicate if current block is leaf
- budget : remaining lifetime for subtree rooted at current block
= lifetime of Mondrian - time of split of parent
NOTE: time of split of parent of root node is 0
"""
def __init__(self, data, settings, budget, parent, range_stats):
self.min_d, self.max_d, self.range_d, self.sum_range_d = range_stats
self.budget = budget + 0.
self.parent = parent
self.left = None
self.right = None
self.is_leaf = True
class MondrianTree(object):
"""
defines a Mondrian tree
variables:
- node_info : stores splits for internal nodes
- root : id of root node
- leaf_nodes : list of leaf nodes
- non_leaf_nodes: list of non-leaf nodes
- max_split_costs : max_split_cost for a node is time of split of node - time of split of parent
max_split_cost is drawn from an exponential
- train_ids : list of train ids stored for paused Mondrian blocks
- counts : stores histogram of labels at each node (when optype = 'class')
- grow_nodes : list of Mondrian blocks that need to be "grown"
functions:
- __init__ : initialize a Mondrian tree
- grow : samples Mondrian block (more precisely, restriction of blocks to training data)
- extend_mondrian : extend a Mondrian to include new training data
- extend_mondrian_block : conditional Mondrian algorithm
"""
def __init__(self, data=None, train_ids=None, settings=None, param=None, cache=None):
"""
initialize Mondrian tree data structure and sample restriction of Mondrian tree to current training data
data is a N x D numpy array containing the entire training data
train_ids is the training ids of the first minibatch
"""
if data is None:
return
root_node = MondrianBlock(data, settings, settings.budget_to_use, None, \
get_data_range(data, train_ids))
self.root = root_node
self.non_leaf_nodes = []
self.leaf_nodes = []
self.node_info = {}
self.max_split_costs = {}
self.split_times = {}
self.train_ids = {root_node: train_ids}
self.copy_params(param, settings)
init_prediction_tree(self, settings)
if cache:
if settings.optype == 'class':
self.counts = {root_node: cache['y_train_counts']}
else:
self.sum_y = {root_node: cache['sum_y']}
self.sum_y2 = {root_node: cache['sum_y2']}
self.n_points = {root_node: cache['n_points']}
if settings.bagging == 1 or settings.n_minibatches > 1:
init_update_posterior_node_incremental(self, data, param, settings, cache, root_node, train_ids)
self.grow_nodes = [root_node]
self.grow(data, settings, param, cache)
def copy_params(self, param, settings):
if settings.optype == 'real':
self.noise_variance = param.noise_variance + 0
self.noise_precision = param.noise_precision + 0
self.sigmoid_coef = param.sigmoid_coef + 0
self.variance_coef = param.variance_coef + 0
def get_average_depth(self, settings, data):
"""
compute average depth of tree (averaged over training data)
= depth of a leaf weighted by fraction of training data at that leaf
"""
self.depth_nodes = {self.root: 0}
tmp_node_list = [self.root]
n_total = 0.
average_depth = 0.
self.node_size_by_depth = defaultdict(list)
leaf_node_sizes = []
while True:
try:
node_id = tmp_node_list.pop(0)
except IndexError:
break
if node_id.is_leaf:
if settings.optype == 'class':
n_points_node = np.sum(self.counts[node_id])
else:
n_points_node = self.n_points[node_id]
n_total += n_points_node
average_depth += n_points_node * self.depth_nodes[node_id]
self.node_size_by_depth[self.depth_nodes[node_id]].append(node_id.sum_range_d)
if not node_id.is_leaf:
self.depth_nodes[node_id.left] = self.depth_nodes[node_id] + 1
self.depth_nodes[node_id.right] = self.depth_nodes[node_id] + 1
tmp_node_list.extend([node_id.left, node_id.right])
else:
leaf_node_sizes.append(node_id.sum_range_d)
assert data['n_train'] == int(n_total)
average_depth /= n_total
average_leaf_node_size = np.mean(leaf_node_sizes)
average_node_size_by_depth = {}
for k in self.node_size_by_depth:
average_node_size_by_depth[k] = np.mean(self.node_size_by_depth[k])
return (average_depth, average_leaf_node_size, average_node_size_by_depth)
def get_print_label_draw_tree(self, node_id, graph):
"""
helper function for draw_tree using pydot
"""
name = self.node_ids_print[node_id]
name2 = name
if name2 == '':
name2 = 'e'
if node_id.is_leaf:
op = name
else:
feat_id, split = self.node_info[node_id]
op = r'x_%d > %.2f\nt = %.2f' % (feat_id+1, split, self.cumulative_split_costs[node_id])
if op == '':
op = 'e'
node = pydot.Node(name=name2, label=op) # latex labels don't work
graph.add_node(node)
return (name2, graph)
def draw_tree(self, data, settings, figure_id=0, i_t=0):
"""
function to draw Mondrian tree using pydot
NOTE: set ADD_TIME=True if you want want set edge length between parent and child
to the difference in time of splits
"""
self.gen_node_ids_print()
self.gen_cumulative_split_costs_only(settings, data)
graph = pydot.Dot(graph_type='digraph')
dummy, graph = self.get_print_label_draw_tree(self.root, graph)
ADD_TIME = False
for node_id in self.non_leaf_nodes:
parent, graph = self.get_print_label_draw_tree(node_id, graph)
left, graph = self.get_print_label_draw_tree(node_id.left, graph)
right, graph = self.get_print_label_draw_tree(node_id.right, graph)
for child, child_id in izip([left, right], [node_id.left, node_id.right]):
edge = pydot.Edge(parent, child)
if ADD_TIME and (not child_id.is_leaf):
edge.set_minlen(self.max_split_costs[child_id])
edge2 = pydot.Edge(dummy, child)
edge2.set_minlen(self.cumulative_split_costs[child_id])
edge2.set_style('invis')
graph.add_edge(edge2)
graph.add_edge(edge)
filename_plot_tag = get_filename_mf(settings)[:-2]
if settings.save:
tree_name = filename_plot_tag + '-mtree_minibatch-' + str(figure_id) + '.pdf'
print 'saving file: %s' % tree_name
graph.write_pdf(tree_name)
def draw_mondrian(self, data, settings, figure_id=None, i_t=0):
"""
function to draw Mondrian partitions; each Mondrian tree is one subplot.
"""
assert data['n_dim'] == 2 and settings.normalize_features == 1 \
and settings.n_mondrians <= 10
self.gen_node_list()
if settings.n_mondrians == 1 and settings.dataset == 'toy-mf':
self.draw_tree(data, settings, figure_id, i_t)
if settings.n_mondrians > 2:
n_row = 2
else:
n_row = 1
n_col = int(math.ceil(settings.n_mondrians / n_row))
if figure_id is None:
figure_id = 0
fig = plt.figure(figure_id)
plt.hold(True)
ax = plt.subplot(n_row, n_col, i_t+1, aspect='equal')
EPS = 0.
ax.set_xlim(xmin=0-EPS)
ax.set_xlim(xmax=1+EPS)
ax.set_ylim(ymin=0-EPS)
ax.set_ylim(ymax=1+EPS)
ax.autoscale(False)
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
non_leaf_nodes = [self.root]
while non_leaf_nodes:
node_id = non_leaf_nodes.pop(0)
try:
feat_id, split = self.node_info[node_id]
except:
continue
left, right = node_id.left, node_id.right
non_leaf_nodes.append(left)
non_leaf_nodes.append(right)
EXTRA = 0.0 # to show splits that separate 2 data points
if feat_id == 1:
# axhline doesn't work if you rescale
ax.hlines(split, node_id.min_d[0] - EXTRA, node_id.max_d[0] + EXTRA, lw=LW, color='k')
else:
ax.vlines(split, node_id.min_d[1] - EXTRA, node_id.max_d[1] + EXTRA, lw=LW, color='k')
# add "outer patch" that defines the extent (not data dependent)
block = patches.Rectangle((0, 0), 1, 1, facecolor='white', edgecolor='gray', ls='dashed')
ax.add_patch(block)
for i_, node_id in enumerate(self.node_list):
# plot only the block where Mondrian has been induced (limited by extent of training data)
block = patches.Rectangle((node_id.min_d[0], node_id.min_d[1]), node_id.range_d[0], \
node_id.range_d[1], facecolor='white', edgecolor='gray')
ax.add_patch(block)
for i_, node_id in enumerate(self.leaf_nodes):
# plot only the block where Mondrian has been induced (limited by extent of training data)
block = patches.Rectangle((node_id.min_d[0], node_id.min_d[1]), node_id.range_d[0], \
node_id.range_d[1], facecolor=colors_list[i_ % len(colors_list)], edgecolor='black')
ax.add_patch(block)
# zorder = 1 will make points inside the blocks invisible, >= 2 will make them visible
x_train = data['x_train'][self.train_ids[node_id], :]
#ax.scatter(x_train[:, 0], x_train[:, 1], color='k', marker='x', s=10, zorder=2)
color_y = 'rbk'
for y_ in range(data['n_class']):
idx = data['y_train'][self.train_ids[node_id]] == y_
ax.scatter(x_train[idx, 0], x_train[idx, 1], color=color_y[y_], marker='o', s=16, zorder=2)
plt.draw()
def gen_node_ids_print(self):
"""
generate binary string label for each node
root_node is denoted by empty string "e"
all other node labels are defined as follows: left(j) = j0, right(j) = j1
e.g. left and right child of root_node are 0 and 1 respectively,
left and right of node 0 are 00 and 01 respectively and so on.
"""
node_ids = [self.root]
self.node_ids_print = {self.root: ''}
while node_ids:
node_id = node_ids.pop(0)
try:
feat_id, split = self.node_info[node_id]
left, right = node_id.left, node_id.right
node_ids.append(left)
node_ids.append(right)
self.node_ids_print[left] = self.node_ids_print[node_id] + '0'
self.node_ids_print[right] = self.node_ids_print[node_id] + '1'
except KeyError:
continue
def print_dict(self, d):
"""
print a dictionary
"""
for k in d:
print '\tk_map = %10s, val = %s' % (self.node_ids_print[k], d[k])
def print_list(self, list_):
"""
print a list
"""
print '\t%s' % ([self.node_ids_print[x] for x in list_])
def print_tree(self, settings):
"""
prints some tree statistics: leaf nodes, non-leaf nodes, information and so on
"""
self.gen_node_ids_print()
print 'printing tree:'
print 'len(leaf_nodes) = %s, len(non_leaf_nodes) = %s' \
% (len(self.leaf_nodes), len(self.non_leaf_nodes))
print 'node_info ='
node_ids = [self.root]
while node_ids:
node_id = node_ids.pop(0)
node_id_print = self.node_ids_print[node_id]
try:
feat_id, split = self.node_info[node_id]
print '%10s, feat = %5d, split = %.2f, node_id = %s' % \
(node_id_print, feat_id, split, node_id)
if settings.optype == 'class':
print 'counts = %s' % self.counts[node_id]
else:
print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id])
left, right = node_id.left, node_id.right
node_ids.append(left)
node_ids.append(right)
except KeyError:
continue
print 'leaf info ='
for node_id in self.leaf_nodes:
node_id_print = self.node_ids_print[node_id]
print '%10s, train_ids = %s, node_id = %s' % \
(node_id_print, self.train_ids[node_id], node_id)
if settings.optype == 'class':
print 'counts = %s' % self.counts[node_id]
else:
print 'n_points = %6d, sum_y = %.2f' % (self.n_points[node_id], self.sum_y[node_id])
def check_if_labels_same(self, node_id):
"""
checks if all labels in a node are identical
"""
return np.count_nonzero(self.counts[node_id]) == 1
def pause_mondrian(self, node_id, settings):
"""
should you pause a Mondrian block or not?
pause if sum_range_d == 0 (important for handling duplicates) or
- optype == class: pause if all labels in a node are identical
- optype == real: pause if n_points < min_samples_split
"""
if settings.optype == 'class':
#pause_mondrian_tmp = self.check_if_labels_same(node_id)
if self.check_if_labels_same(node_id):
pause_mondrian_tmp = True
else:
pause_mondrian_tmp = (np.sum(self.counts[node_id]) < settings.min_samples_split)
else:
pause_mondrian_tmp = self.n_points[node_id] < settings.min_samples_split
pause_mondrian = pause_mondrian_tmp or (node_id.sum_range_d == 0)
return pause_mondrian
def get_parent_split_time(self, node_id, settings):
if node_id == self.root:
return 0.
else:
return self.split_times[node_id.parent]
def update_gaussian_hyperparameters(self, param, data, settings):
n_points = float(self.n_points[self.root])
param.prior_mean = self.sum_y[self.root] / n_points
param.prior_variance = self.sum_y2[self.root] / n_points \
- param.prior_mean ** 2
param.prior_precision = 1.0 / param.prior_variance
# TODO: estimate K using estimate of noise variance at leaf nodes?
# TODO: need to do this once for forest, rather than for each tree
# FIXME very very hacky, surely a better way to tune this?
if 'sfactor' in settings.tag:
s_begin = settings.tag.find('sfactor-') + 8
s_tmp = settings.tag[s_begin:]
s_factor = float(s_tmp[:s_tmp.find('-')])
else:
s_factor = 2.0
if 'kfactor' in settings.tag:
k_begin = settings.tag.find('kfactor-') + 8
k_tmp = settings.tag[k_begin:]
k_factor = float(k_tmp[:k_tmp.find('-')])
else:
k_factor = min(2 * n_points, 500) # noise variance is 1/K times prior_variance
if k_factor <= 0.:
K = 2. * n_points
else:
K = k_factor
param.noise_variance = param.prior_variance / K
param.noise_precision = 1.0 / param.noise_variance
param.variance_coef = 2.0 * param.prior_variance * K / (K + 2.)
param.sigmoid_coef = data['n_dim'] / (s_factor * np.log2(n_points))
# FIXME: important to copy over since prediction accesses hyperparameters in self
self.copy_params(param, settings)
def get_node_mean_and_variance(self, node):
n_points = float(self.n_points[node])
node_mean = self.sum_y[node] / n_points
node_variance = self.sum_y2[node] / n_points - node_mean ** 2
return (node_mean, node_variance)
def update_gaussian_hyperparameters_indep(self, param, data, settings):
n_points = float(self.n_points[self.root])
self.prior_mean, self.prior_variance = self.get_node_mean_and_variance(self.root)
self.prior_precision = 1.0 / self.prior_variance
self.cumulative_split_costs = {}
self.leaf_means = []
self.leaf_variances = []
node_means = []
d_node_means = {self.root: self.prior_mean}
node_parent_means = []
node_split_times = []
node_parent_split_times = []
if self.root.is_leaf:
self.cumulative_split_costs[self.root] = 0.
remaining = []
self.max_split_time = 0.1 # NOTE: initial value, need to specify non-zero value
else:
self.cumulative_split_costs[self.root] = self.max_split_costs[self.root]
remaining = [self.root.left, self.root.right]
self.max_split_time = self.cumulative_split_costs[self.root] + 0
node_split_times.append(self.cumulative_split_costs[self.root])
node_parent_split_times.append(0.)
node_means.append(self.prior_mean)
node_parent_means.append(self.prior_mean)
while True:
try:
node_id = remaining.pop(0)
except IndexError:
break
self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \
+ self.max_split_costs[node_id]
node_mean, node_variance = self.get_node_mean_and_variance(node_id)
node_split_times.append(self.cumulative_split_costs[node_id])
node_parent_split_times.append(self.cumulative_split_costs[node_id.parent])
node_means.append(node_mean)
node_parent_means.append(d_node_means[node_id.parent])
d_node_means[node_id] = node_mean
if not node_id.is_leaf:
remaining.append(node_id.left)
remaining.append(node_id.right)
self.max_split_time = max(self.max_split_time, self.cumulative_split_costs[node_id])
else:
self.leaf_means.append(node_mean)
self.leaf_variances.append(node_variance)
#self.noise_variance = np.max(self.leaf_variances)
self.noise_variance = np.mean(self.leaf_variances)
self.noise_precision = 1.0 / self.noise_variance
self.sigmoid_coef = 3. / self.max_split_time
#self.sigmoid_coef = data['n_dim']
#self.sigmoid_coef = data['n_dim'] / 5
#self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points))
#self.sigmoid_coef = data['n_dim'] / (2. * np.log2(n_points))
#self.sigmoid_coef = data['n_dim'] / (n_points)
#self.variance_leaf_from_root = 2 * np.mean((np.array(self.leaf_means) - self.prior_mean) ** 2)
# set sd to 3 times the empirical sd so that leaf node means are highly plausible (avoid too much smoothing)
#self.variance_coef = 1.0 * self.variance_leaf_from_root
if self.root.is_leaf:
self.variance_coef = 1.0
else:
node_means = np.array(node_means)
node_parent_means = np.array(node_parent_means)
node_split_times = np.array(node_split_times)
node_parent_split_times = np.array(node_parent_split_times)
tmp_den = sigmoid(self.sigmoid_coef * node_split_times) \
- sigmoid(self.sigmoid_coef * node_parent_split_times)
tmp_num = (node_means - node_parent_means) ** 2
variance_coef_est = np.mean(tmp_num / tmp_den)
self.variance_coef = variance_coef_est
print 'sigmoid_coef = %.3f, variance_coef = %.3f' % (self.sigmoid_coef, variance_coef_est)
def grow(self, data, settings, param, cache):
"""
sample a Mondrian tree (each Mondrian block is restricted to range of training data in that block)
"""
if settings.debug:
print 'entering grow'
while self.grow_nodes:
node_id = self.grow_nodes.pop(0)
train_ids = self.train_ids[node_id]
if settings.debug:
print 'node_id = %s' % node_id
pause_mondrian = self.pause_mondrian(node_id, settings)
if settings.debug and pause_mondrian:
print 'pausing mondrian at node = %s, train_ids = %s' % (node_id, self.train_ids[node_id])
if pause_mondrian or (node_id.sum_range_d == 0): # BL: redundant now
split_cost = np.inf
self.max_split_costs[node_id] = node_id.budget + 0
self.split_times[node_id] = np.inf # FIXME: is this correct? inf or budget?
else:
split_cost = random.expovariate(node_id.sum_range_d)
self.max_split_costs[node_id] = split_cost
self.split_times[node_id] = split_cost + self.get_parent_split_time(node_id, settings)
new_budget = node_id.budget - split_cost
if node_id.budget > split_cost:
feat_id_chosen = sample_multinomial_scores(node_id.range_d)
split_chosen = random.uniform(node_id.min_d[feat_id_chosen], \
node_id.max_d[feat_id_chosen])
(train_ids_left, train_ids_right, cache_tmp) = \
compute_left_right_statistics(data, param, cache, train_ids, feat_id_chosen, split_chosen, settings)
left = MondrianBlock(data, settings, new_budget, node_id, get_data_range(data, train_ids_left))
right = MondrianBlock(data, settings, new_budget, node_id, get_data_range(data, train_ids_right))
node_id.left, node_id.right = left, right
self.grow_nodes.append(left)
self.grow_nodes.append(right)
self.train_ids[left] = train_ids_left
self.train_ids[right] = train_ids_right
if settings.optype == 'class':
self.counts[left] = cache_tmp['cnt_left_chosen']
self.counts[right] = cache_tmp['cnt_right_chosen']
else:
self.sum_y[left] = cache_tmp['sum_y_left']
self.sum_y2[left] = cache_tmp['sum_y2_left']
self.n_points[left] = cache_tmp['n_points_left']
self.sum_y[right] = cache_tmp['sum_y_right']
self.sum_y2[right] = cache_tmp['sum_y2_right']
self.n_points[right] = cache_tmp['n_points_right']
self.node_info[node_id] = [feat_id_chosen, split_chosen]
self.non_leaf_nodes.append(node_id)
node_id.is_leaf = False
if not settings.draw_mondrian:
self.train_ids.pop(node_id)
else:
self.leaf_nodes.append(node_id) # node_id.is_leaf set to True at init
def gen_cumulative_split_costs_only(self, settings, data):
"""
creates node_id.cumulative_split_cost as well as a dictionary self.cumulative_split_costs
helper function for draw_tree
"""
self.cumulative_split_costs = {}
if self.root.is_leaf:
self.cumulative_split_costs[self.root] = 0.
remaining = []
else:
self.cumulative_split_costs[self.root] = self.max_split_costs[self.root]
remaining = [self.root.left, self.root.right]
while True:
try:
node_id = remaining.pop(0)
except IndexError:
break
self.cumulative_split_costs[node_id] = self.cumulative_split_costs[node_id.parent] \
+ self.max_split_costs[node_id]
if not node_id.is_leaf:
remaining.append(node_id.left)
remaining.append(node_id.right)
def gen_node_list(self):
"""
generates an ordered node_list such that parent appears before children
useful for updating predictive posteriors
"""
self.node_list = [self.root]
i = -1
while True:
try:
i += 1
node_id = self.node_list[i]
except IndexError:
break
if not node_id.is_leaf:
self.node_list.extend([node_id.left, node_id.right])
def predict_class(self, x_test, n_class, param, settings):
"""
predict new label (for classification tasks)
"""
pred_prob = np.zeros((x_test.shape[0], n_class))
prob_not_separated_yet = np.ones(x_test.shape[0])
prob_separated = np.zeros(x_test.shape[0])
node_list = [self.root]
d_idx_test = {self.root: np.arange(x_test.shape[0])}
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
idx_test = d_idx_test[node_id]
if len(idx_test) == 0:
continue
x = x_test[idx_test, :]
expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1)
prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id])
prob_separated_now = 1 - prob_not_separated_now
if math.isinf(self.max_split_costs[node_id]):
# rare scenario where test point overlaps exactly with a training data point
idx_zero = expo_parameter == 0
# to prevent nan in computation above when test point overlaps with training data point
prob_not_separated_now[idx_zero] = 1.
prob_separated_now[idx_zero] = 0.
# predictions for idx_test_zero
# data dependent discounting (depending on how far test data point is from the mondrian block)
idx_non_zero = expo_parameter > 0
idx_test_non_zero = idx_test[idx_non_zero]
expo_parameter_non_zero = expo_parameter[idx_non_zero]
base = self.get_prior_mean(node_id, param, settings)
if np.any(idx_non_zero):
num_tables_k, num_customers, num_tables = self.get_counts(self.cnt[node_id])
# expected discount (averaging over time of cut which is a truncated exponential)
# discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) * \
# (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id]))
discount = (expo_parameter_non_zero / (expo_parameter_non_zero + settings.discount_param)) \
* (-np.expm1(-(expo_parameter_non_zero + settings.discount_param) * self.max_split_costs[node_id])) \
/ (-np.expm1(-expo_parameter_non_zero * self.max_split_costs[node_id]))
discount_per_num_customers = discount / num_customers
pred_prob_tmp = num_tables * discount_per_num_customers[:, np.newaxis] * base \
+ self.cnt[node_id] / num_customers - discount_per_num_customers[:, np.newaxis] * num_tables_k
if settings.debug:
check_if_one(np.sum(pred_prob_tmp))
pred_prob[idx_test_non_zero, :] += prob_separated_now[idx_non_zero][:, np.newaxis] \
* prob_not_separated_yet[idx_test_non_zero][:, np.newaxis] * pred_prob_tmp
prob_not_separated_yet[idx_test] *= prob_not_separated_now
# predictions for idx_test_zero
if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero):
idx_test_zero = idx_test[idx_zero]
pred_prob_node_id = self.compute_posterior_mean_normalized_stable(self.cnt[node_id], \
self.get_discount_node_id(node_id, settings), base, settings)
pred_prob[idx_test_zero, :] += prob_not_separated_yet[idx_test_zero][:, np.newaxis] * pred_prob_node_id
try:
feat_id, split = self.node_info[node_id]
cond = x[:, feat_id] <= split
left, right = get_children_id(node_id)
d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond]
node_list.append(left)
node_list.append(right)
except KeyError:
pass
if True or settings.debug:
check_if_zero(np.sum(np.abs(np.sum(pred_prob, 1) - 1)))
return pred_prob
def predict_real(self, x_test, y_test, param, settings):
"""
predict new label (for regression tasks)
"""
pred_mean = np.zeros(x_test.shape[0])
pred_second_moment = np.zeros(x_test.shape[0])
pred_sample = np.zeros(x_test.shape[0])
log_pred_prob = -np.inf * np.ones(x_test.shape[0])
prob_not_separated_yet = np.ones(x_test.shape[0])
prob_separated = np.zeros(x_test.shape[0])
node_list = [self.root]
d_idx_test = {self.root: np.arange(x_test.shape[0])}
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
idx_test = d_idx_test[node_id]
if len(idx_test) == 0:
continue
x = x_test[idx_test, :]
expo_parameter = np.maximum(0, node_id.min_d - x).sum(1) + np.maximum(0, x - node_id.max_d).sum(1)
prob_not_separated_now = np.exp(-expo_parameter * self.max_split_costs[node_id])
prob_separated_now = 1 - prob_not_separated_now
if math.isinf(self.max_split_costs[node_id]):
# rare scenario where test point overlaps exactly with a training data point
idx_zero = expo_parameter == 0
# to prevent nan in computation above when test point overlaps with training data point
prob_not_separated_now[idx_zero] = 1.
prob_separated_now[idx_zero] = 0.
# predictions for idx_test_zero
idx_non_zero = expo_parameter > 0
idx_test_non_zero = idx_test[idx_non_zero]
n_test_non_zero = len(idx_test_non_zero)
expo_parameter_non_zero = expo_parameter[idx_non_zero]
if np.any(idx_non_zero):
# expected variance (averaging over time of cut which is a truncated exponential)
# NOTE: expected variance is approximate since E[f(x)] not equal to f(E[x])
expected_cut_time = 1.0 / expo_parameter_non_zero
if not np.isinf(self.max_split_costs[node_id]):
tmp_exp_term_arg = -self.max_split_costs[node_id] * expo_parameter_non_zero
tmp_exp_term = np.exp(tmp_exp_term_arg)
expected_cut_time -= self.max_split_costs[node_id] * tmp_exp_term / (-np.expm1(tmp_exp_term_arg))
try:
assert np.all(expected_cut_time >= 0.)
except AssertionError:
print tmp_exp_term_arg
print tmp_exp_term
print expected_cut_time
print np.any(np.isnan(expected_cut_time))
print 1.0 / expo_parameter_non_zero
raise AssertionError
if not settings.smooth_hierarchically:
pred_mean_tmp = self.sum_y[node_id] / float(self.n_points[node_id])
pred_second_moment_tmp = self.sum_y2[node_id] / float(self.n_points[node_id]) + param.noise_variance
else:
pred_mean_tmp, pred_second_moment_tmp = self.pred_moments[node_id]
# FIXME: approximate since E[f(x)] not equal to f(E[x])
expected_split_time = expected_cut_time + self.get_parent_split_time(node_id, settings)
variance_from_mean = self.variance_coef * (sigmoid(self.sigmoid_coef * expected_split_time) \
- sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
pred_second_moment_tmp += variance_from_mean
pred_variance_tmp = pred_second_moment_tmp - pred_mean_tmp ** 2
pred_sample_tmp = pred_mean_tmp + np.random.randn(n_test_non_zero) * np.sqrt(pred_variance_tmp)
log_pred_prob_tmp = compute_gaussian_logpdf(pred_mean_tmp, pred_variance_tmp, y_test[idx_test_non_zero])
prob_separated_now_weighted = \
prob_separated_now[idx_non_zero] * prob_not_separated_yet[idx_test_non_zero]
pred_mean[idx_test_non_zero] += prob_separated_now_weighted * pred_mean_tmp
pred_sample[idx_test_non_zero] += prob_separated_now_weighted * pred_sample_tmp
pred_second_moment[idx_test_non_zero] += prob_separated_now_weighted * pred_second_moment_tmp
log_pred_prob[idx_test_non_zero] = logsumexp_array(log_pred_prob[idx_test_non_zero], \
np.log(prob_separated_now_weighted) + log_pred_prob_tmp)
prob_not_separated_yet[idx_test] *= prob_not_separated_now
# predictions for idx_test_zero
if math.isinf(self.max_split_costs[node_id]) and np.any(idx_zero):
idx_test_zero = idx_test[idx_zero]
n_test_zero = len(idx_test_zero)
if not settings.smooth_hierarchically:
pred_mean_node_id = self.sum_y[node_id] / float(self.n_points[node_id])
pred_second_moment_node_id = self.sum_y2[node_id] / float(self.n_points[node_id]) \
+ param.noise_variance
else:
pred_mean_node_id, pred_second_moment_node_id = self.pred_moments[node_id]
pred_variance_node_id = pred_second_moment_node_id - pred_mean_node_id ** 2
pred_sample_node_id = pred_mean_node_id + np.random.randn(n_test_zero) * np.sqrt(pred_variance_node_id)
log_pred_prob_node_id = \
compute_gaussian_logpdf(pred_mean_node_id, pred_variance_node_id, y_test[idx_test_zero])
pred_mean[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_mean_node_id
pred_sample[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_sample_node_id
pred_second_moment[idx_test_zero] += prob_not_separated_yet[idx_test_zero] * pred_second_moment_node_id
log_pred_prob[idx_test_zero] = logsumexp_array(log_pred_prob[idx_test_zero], \
np.log(prob_not_separated_yet[idx_test_zero]) + log_pred_prob_node_id)
try:
feat_id, split = self.node_info[node_id]
cond = x[:, feat_id] <= split
left, right = get_children_id(node_id)
d_idx_test[left], d_idx_test[right] = idx_test[cond], idx_test[~cond]
node_list.append(left)
node_list.append(right)
except KeyError:
pass
pred_var = pred_second_moment - (pred_mean ** 2)
if True or settings.debug: # FIXME: remove later
assert not np.any(np.isnan(pred_mean))
assert not np.any(np.isnan(pred_var))
try:
assert np.all(pred_var >= 0.)
except AssertionError:
min_pred_var = np.min(pred_var)
print 'min_pred_var = %s' % min_pred_var
assert np.abs(min_pred_var) < 1e-3 # allowing some numerical errors
assert not np.any(np.isnan(log_pred_prob))
return (pred_mean, pred_var, pred_second_moment, log_pred_prob, pred_sample)
def extend_mondrian(self, data, train_ids_new, settings, param, cache):
"""
extend Mondrian tree to include new training data indexed by train_ids_new
"""
self.extend_mondrian_block(self.root, train_ids_new, data, settings, param, cache)
if settings.debug:
print 'completed extend_mondrian'
self.check_tree(settings, data)
def check_tree(self, settings, data):
"""
check if tree violates any sanity check
"""
if settings.debug:
#print '\nchecking tree'
print '\nchecking tree: printing tree first'
self.print_tree(settings)
for node_id in self.non_leaf_nodes:
assert node_id.left.parent == node_id.right.parent == node_id
assert not node_id.is_leaf
if settings.optype == 'class':
assert np.count_nonzero(self.counts[node_id]) > 1
assert not self.pause_mondrian(node_id, settings)
if node_id != self.root:
assert np.all(node_id.min_d >= node_id.parent.min_d)
assert np.all(node_id.max_d <= node_id.parent.max_d)
if settings.optype == 'class':
try:
check_if_zero(np.sum(np.abs(self.counts[node_id] - \
self.counts[node_id.left] - self.counts[node_id.right])))
except AssertionError:
print 'counts: node = %s, left = %s, right = %s' \
% (self.counts[node_id], self.counts[node_id.left], self.counts[node_id.right])
raise AssertionError
if settings.budget == -1:
assert math.isinf(node_id.budget)
check_if_zero(self.split_times[node_id] - self.get_parent_split_time(node_id, settings) \
- self.max_split_costs[node_id])
if settings.optype == 'class':
num_data_points = 0
for node_id in self.leaf_nodes:
assert node_id.is_leaf
assert math.isinf(self.max_split_costs[node_id])
if settings.budget == -1:
assert math.isinf(node_id.budget)
if settings.optype == 'class':
num_data_points += self.counts[node_id].sum()
assert (np.count_nonzero(self.counts[node_id]) == 1) or (np.sum(self.counts[node_id]) < settings.min_samples_split)
assert self.pause_mondrian(node_id, settings)
if node_id != self.root:
assert np.all(node_id.min_d >= node_id.parent.min_d)
assert np.all(node_id.max_d <= node_id.parent.max_d)
if settings.optype == 'class':
print 'num_train = %s, number of data points at leaf nodes = %s' % \
(data['n_train'], num_data_points)
set_non_leaf = set(self.non_leaf_nodes)
set_leaf = set(self.leaf_nodes)
assert (set_leaf & set_non_leaf) == set([])
assert set_non_leaf == set(self.node_info.keys())
assert len(set_leaf) == len(self.leaf_nodes)
assert len(set_non_leaf) == len(self.non_leaf_nodes)
def extend_mondrian_block(self, node_id, train_ids_new, data, settings, param, cache):
"""
conditional Mondrian algorithm that extends a Mondrian block to include new training data
"""
if settings.debug:
print 'entered extend_mondrian_block'
print '\nextend_mondrian_block: node_id = %s' % node_id
if not train_ids_new.size:
if settings.debug:
print 'nothing to extend here; train_ids_new = %s' % train_ids_new
# nothing to extend
return
min_d, max_d = get_data_min_max(data, train_ids_new)
additional_extent_lower = np.maximum(0, node_id.min_d - min_d)
additional_extent_upper = np.maximum(0, max_d - node_id.max_d)
expo_parameter = float(additional_extent_lower.sum() + additional_extent_upper.sum())
if expo_parameter == 0:
split_cost = np.inf
else:
split_cost = random.expovariate(expo_parameter) # will be updated below in case mondrian is paused
unpause_paused_mondrian = False
if settings.debug:
print 'is_leaf = %s, pause_mondrian = %s, sum_range_d = %s' % \
(node_id.is_leaf, self.pause_mondrian(node_id, settings), node_id.sum_range_d)
if self.pause_mondrian(node_id, settings):
assert node_id.is_leaf
split_cost = np.inf
n_points_new = len(data['y_train'][train_ids_new])
# FIXME: node_id.sum_range_d not tested
if settings.optype == 'class':
y_unique = np.unique(data['y_train'][train_ids_new])
# unpause only if more than one unique label and number of points >= min_samples_split
is_pure_leaf = (len(y_unique) == 1) and (self.counts[node_id][y_unique] > 0) \
and self.check_if_labels_same(node_id)
if is_pure_leaf:
unpause_paused_mondrian = False
else:
unpause_paused_mondrian = \
((n_points_new + np.sum(self.counts[node_id])) >= settings.min_samples_split)
else:
unpause_paused_mondrian = \
not( (n_points_new + self.n_points[node_id]) < settings.min_samples_split )
if settings.debug:
print 'trying to extend a paused Mondrian; is_leaf = %s, node_id = %s' % (node_id.is_leaf, node_id)
if settings.optype == 'class':
print 'y_unique (new) = %s, n_points_new = %s, counts = %s, split_cost = %s, max_split_costs = %s' % \
(y_unique, n_points_new, self.counts[node_id], split_cost, self.max_split_costs[node_id])
print 'unpause_paused_mondrian = %s, is_pure_leaf = %s' % (unpause_paused_mondrian, is_pure_leaf)
if split_cost >= self.max_split_costs[node_id]:
# take root form of node_id (no cut outside the extent of the current block)
if not node_id.is_leaf:
if settings.debug:
print 'take root form: non-leaf node'
feat_id, split = self.node_info[node_id]
update_range_stats(node_id, (min_d, max_d)) # required here as well
left, right = node_id.left, node_id.right
cond = data['x_train'][train_ids_new, feat_id] <= split
train_ids_new_left, train_ids_new_right = train_ids_new[cond], train_ids_new[~cond]
self.add_training_points_to_node(node_id, train_ids_new, data, param, settings, cache, False)
self.extend_mondrian_block(left, train_ids_new_left, data, settings, param, cache)
self.extend_mondrian_block(right, train_ids_new_right, data, settings, param, cache)
else:
# reached a leaf; add train_ids_new to node_id & update range
if settings.debug:
print 'take root form: leaf node'
assert node_id.is_leaf
update_range_stats(node_id, (min_d, max_d))
self.add_training_points_to_node(node_id, train_ids_new, data, param, settings, cache, True)
# FIXME: node_id.sum_range_d tested here; perhaps move this to pause_mondrian?
unpause_paused_mondrian = unpause_paused_mondrian and (node_id.sum_range_d != 0)
if not self.pause_mondrian(node_id, settings):
assert unpause_paused_mondrian
self.leaf_nodes.remove(node_id)
self.grow_nodes = [node_id]
self.grow(data, settings, param, cache)
else:
# initialize "outer mondrian"
if settings.debug:
print 'trying to introduce a cut outside current block'
new_block = MondrianBlock(data, settings, node_id.budget, node_id.parent, \
get_data_range_from_min_max(np.minimum(min_d, node_id.min_d), np.maximum(max_d, node_id.max_d)))
init_update_posterior_node_incremental(self, data, param, settings, cache, new_block, \
train_ids_new, node_id) # counts of outer block are initialized with counts of current block
if node_id.is_leaf:
warn('\nWARNING: a leaf should not be expanded here; printing out some diagnostics')
print 'node_id = %s, is_leaf = %s, max_split_cost = %s, split_cost = %s' \
% (node_id, node_id.is_leaf, self.max_split_costs[node_id], split_cost)
print 'counts = %s\nmin_d = \n%s\nmax_d = \n%s' % (self.counts[node_id], node_id.min_d, node_id.max_d)
raise Exception('a leaf should be expanded via grow call; see diagnostics above')
if settings.debug:
print 'looks like cut possible'
# there is a cut outside the extent of the current block
feat_score = additional_extent_lower + additional_extent_upper
feat_id = sample_multinomial_scores(feat_score)
draw_from_lower = np.random.rand() <= (additional_extent_lower[feat_id] / feat_score[feat_id])
if draw_from_lower:
split = random.uniform(min_d[feat_id], node_id.min_d[feat_id])
else:
split = random.uniform(node_id.max_d[feat_id], max_d[feat_id])
assert (split < node_id.min_d[feat_id]) or (split > node_id.max_d[feat_id])
new_budget = node_id.budget - split_cost
cond = data['x_train'][train_ids_new, feat_id] <= split
train_ids_new_left, train_ids_new_right = train_ids_new[cond], train_ids_new[~cond]
is_left = split > node_id.max_d[feat_id] # is existing block the left child of "outer mondrian"?
if is_left:
train_ids_new_child = train_ids_new_right # new_child is the other child of "outer mondrian"
else:
train_ids_new_child = train_ids_new_left
# grow the "unconditional mondrian child" of the "outer mondrian"
new_child = MondrianBlock(data, settings, new_budget, new_block, get_data_range(data, train_ids_new_child))
if settings.debug:
print 'new_block = %s' % new_block
print 'new_child = %s' % new_child
self.train_ids[new_child] = train_ids_new_child # required for grow call below
init_update_posterior_node_incremental(self, data, param, settings, cache, new_child, train_ids_new_child)
self.node_info[new_block] = (feat_id, split)
if settings.draw_mondrian:
train_ids_new_block = np.append(self.train_ids[node_id], train_ids_new)
self.train_ids[new_block] = train_ids_new_block
self.non_leaf_nodes.append(new_block)
new_block.is_leaf = False
# update budget and call the "conditional mondrian child" of the "outer mondrian"
node_id.budget = new_budget
# self.max_split_costs[new_child] will be added in the grow call above
self.max_split_costs[new_block] = split_cost
self.split_times[new_block] = split_cost + self.get_parent_split_time(node_id, settings)
self.max_split_costs[node_id] -= split_cost
check_if_zero(self.split_times[node_id] - self.split_times[new_block] - self.max_split_costs[node_id])
# grow the new child of the "outer mondrian"
self.grow_nodes = [new_child]
self.grow(data, settings, param, cache)
# update tree structure and extend "conditional mondrian child" of the "outer mondrian"
if node_id == self.root:
self.root = new_block
else:
if settings.debug:
assert (node_id.parent.left == node_id) or (node_id.parent.right == node_id)
if node_id.parent.left == node_id:
node_id.parent.left = new_block
else:
node_id.parent.right = new_block
node_id.parent = new_block
if is_left:
new_block.left = node_id
new_block.right = new_child
self.extend_mondrian_block(node_id, train_ids_new_left, data, settings, param, cache)
else:
new_block.left = new_child
new_block.right = node_id
self.extend_mondrian_block(node_id, train_ids_new_right, data, settings, param, cache)
def add_training_points_to_node(self, node_id, train_ids_new, data, param, settings, cache, pause_mondrian=False):
"""
add a training data point to a node in the tree
"""
# range updated in extend_mondrian_block
if settings.draw_mondrian or pause_mondrian:
self.train_ids[node_id] = np.append(self.train_ids[node_id], train_ids_new)
update_posterior_node_incremental(self, data, param, settings, cache, node_id, train_ids_new)
def update_posterior_counts(self, param, data, settings):
"""
posterior update for hierarchical normalized stable distribution
using interpolated Kneser Ney smoothing (where number of tables serving a dish at a restaurant is atmost 1)
NOTE: implementation optimized for minibatch training where more than one data point added per minibatch
if only 1 datapoint is added, lots of counts will be unnecesarily updated
"""
self.cnt = {}
node_list = [self.root]
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
if node_id.is_leaf:
cnt = self.counts[node_id]
else:
cnt = np.minimum(self.counts[node_id.left], 1) + np.minimum(self.counts[node_id.right], 1)
node_list.extend([node_id.left, node_id.right])
self.cnt[node_id] = cnt
def update_predictive_posteriors(self, param, data, settings):
"""
update predictive posterior for hierarchical normalized stable distribution
pred_prob computes posterior mean of the label distribution at each node recursively
"""
node_list = [self.root]
if settings.debug:
self.gen_node_ids_print()
while True:
try:
node_id = node_list.pop(0)
except IndexError:
break
base = self.get_prior_mean(node_id, param, settings)
discount = self.get_discount_node_id(node_id, settings)
cnt = self.cnt[node_id]
if not node_id.is_leaf:
self.pred_prob[node_id] = self.compute_posterior_mean_normalized_stable(cnt, discount, base, settings)
node_list.extend([node_id.left, node_id.right])
if settings.debug and False:
print 'node_id = %20s, is_leaf = %5s, discount = %.2f, cnt = %s, base = %s, pred_prob = %s' \
% (self.node_ids_print[node_id], node_id.is_leaf, discount, cnt, base, self.pred_prob[node_id])
check_if_one(np.sum(self.pred_prob[node_id]))
def get_variance_node(self, node_id, param, settings):
# the non-linear transformation should be a monotonically non-decreasing function
# if the function saturates (e.g. sigmoid) children will be closer to parent deeper down the tree
# var = self.variance_coef * (sigmoid(self.sigmoid_coef * self.split_times[node_id]) \
# - sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
var = self.variance_coef * (sigmoid(self.sigmoid_coef * self.split_times[node_id]) \
- sigmoid(self.sigmoid_coef * self.get_parent_split_time(node_id, settings)))
return var
def update_posterior_gaussians(self, param, data, settings):
"""
computes marginal gaussian distribution at each node of the tree using gaussian belief propagation
the solution is exact since underlying graph is a tree
solution takes O(#nodes) time, which is much more efficient than naive GP implementation which
would cost O(#nodes^3) time
"""
self.gen_node_list()
self.message_to_parent = {}
self.message_from_parent = {}
self.likelihood_children = {}
self.pred_param = {}
self.pred_moments = {}
for node_id in self.node_list[::-1]:
if node_id.is_leaf:
# use marginal likelihood of data at this leaf
mean = self.sum_y[node_id] / float(self.n_points[node_id])
variance = self.get_variance_node(node_id, param, settings) \
+ self.noise_variance / float(self.n_points[node_id])
precision = 1.0 / variance
self.message_to_parent[node_id] = np.array([mean, precision])
self.likelihood_children[node_id] = np.array([mean, self.noise_precision*float(self.n_points[node_id])])
else:
likelihood_children = multiply_gaussians(self.message_to_parent[node_id.left], \
self.message_to_parent[node_id.right])
mean = likelihood_children[0]
self.likelihood_children[node_id] = likelihood_children
variance = self.get_variance_node(node_id, param, settings) + 1.0 / likelihood_children[1]
precision = 1.0 / variance
self.message_to_parent[node_id] = np.array([mean, precision])
variance_at_root = self.get_variance_node(node_id, param, settings)
self.message_from_parent[self.root] = np.array([param.prior_mean, variance_at_root])
for node_id in self.node_list:
# pred_param stores the mean and precision
self.pred_param[node_id] = multiply_gaussians(self.message_from_parent[node_id], \
self.likelihood_children[node_id])
# pred_moments stores the first and second moments (useful for prediction)
self.pred_moments[node_id] = np.array([self.pred_param[node_id][0], \
1.0 / self.pred_param[node_id][1] + self.pred_param[node_id][0] ** 2 + self.noise_variance])
if not node_id.is_leaf:
self.message_from_parent[node_id.left] = \
multiply_gaussians(self.message_from_parent[node_id], self.message_to_parent[node_id.right])
self.message_from_parent[node_id.right] = \
multiply_gaussians(self.message_from_parent[node_id], self.message_to_parent[node_id.left])
def update_posterior_counts_and_predictive_posteriors(self, param, data, settings):
if settings.optype == 'class':
# update posterior counts
self.update_posterior_counts(param, data, settings)
# update predictive posteriors
self.update_predictive_posteriors(param, data, settings)
else:
# updates hyperparameters in param (common to all trees)
self.update_gaussian_hyperparameters(param, data, settings)
# updates hyperparameters in self (independent for each tree)
# self.update_gaussian_hyperparameters_indep(param, data, settings)
if settings.smooth_hierarchically:
self.update_posterior_gaussians(param, data, settings)
def get_prior_mean(self, node_id, param, settings):
if settings.optype == 'class':
if node_id == self.root:
base = param.base_measure
else:
base = self.pred_prob[node_id.parent]
else:
base = None # for settings.settings.smooth_hierarchically = False
return base
def get_discount_node_id(self, node_id, settings):
"""
compute discount for a node (function of discount_param, time of split and time of split of parent)
"""
discount = math.exp(-settings.discount_param * self.max_split_costs[node_id])
return discount
def compute_posterior_mean_normalized_stable(self, cnt, discount, base, settings):
num_tables_k, num_customers, num_tables = self.get_counts(cnt)
pred_prob = (cnt - discount * num_tables_k + discount * num_tables * base) / num_customers
if settings.debug:
check_if_one(pred_prob.sum())
return pred_prob
def get_counts(self, cnt):
num_tables_k = np.minimum(cnt, 1)
num_customers = float(cnt.sum())
num_tables = float(num_tables_k.sum())
return (num_tables_k, num_customers, num_tables)
def get_data_range(data, train_ids):
"""
returns min, max, range and linear dimension of training data
"""
min_d, max_d = get_data_min_max(data, train_ids)
range_d = max_d - min_d
sum_range_d = float(range_d.sum())
return (min_d, max_d, range_d, sum_range_d)
def get_data_min_max(data, train_ids):
"""
returns min, max of training data
"""
x_tmp = data['x_train'].take(train_ids, 0)
min_d = np.min(x_tmp, 0)
max_d = np.max(x_tmp, 0)
return (min_d, max_d)
def get_data_range_from_min_max(min_d, max_d):
range_d = max_d - min_d
sum_range_d = float(range_d.sum())
return (min_d, max_d, range_d, sum_range_d)
def update_range_stats(node_id, (min_d, max_d)):
"""
updates min and max of training data at this block
"""
node_id.min_d = np.minimum(node_id.min_d, min_d)
node_id.max_d = np.maximum(node_id.max_d, max_d)
node_id.range_d = node_id.max_d - node_id.min_d
node_id.sum_range_d = float(node_id.range_d.sum())
def get_children_id(parent):
return (parent.left, parent.right)
class MondrianForest(Forest):
"""
defines Mondrian forest
variables:
- forest : stores the Mondrian forest
methods:
- fit(data, train_ids_current_minibatch, settings, param, cache) : batch training
- partial_fit(data, train_ids_current_minibatch, settings, param, cache) : online training
- evaluate_predictions (see Forest in mondrianforest_utils.py) : predictions
"""
def __init__(self, settings, data):
self.forest = [None] * settings.n_mondrians
if settings.optype == 'class':
settings.discount_param = settings.discount_factor * data['n_dim']
def fit(self, data, train_ids_current_minibatch, settings, param, cache):
for i_t, tree in enumerate(self.forest):
if settings.verbose >= 2 or settings.debug:
print 'tree_id = %s' % i_t
tree = self.forest[i_t] = MondrianTree(data, train_ids_current_minibatch, settings, param, cache)
tree.update_posterior_counts_and_predictive_posteriors(param, data, settings)
def partial_fit(self, data, train_ids_current_minibatch, settings, param, cache):
for i_t, tree in enumerate(self.forest):
if settings.verbose >= 2 or settings.debug:
print 'tree_id = %s' % i_t
tree.extend_mondrian(data, train_ids_current_minibatch, settings, param, cache)
tree.update_posterior_counts_and_predictive_posteriors(param, data, settings)
def main():
time_0 = time.clock()
settings = process_command_line()
print
print '%' * 120
print 'Beginning mondrianforest.py'
print 'Current settings:'
pp.pprint(vars(settings))
# Resetting random seed
reset_random_seed(settings)
# Loading data
print '\nLoading data ...'
data = load_data(settings)
print 'Loading data ... completed'
print 'Dataset name = %s' % settings.dataset
print 'Characteristics of the dataset:'
print 'n_train = %d, n_test = %d, n_dim = %d' %\
(data['n_train'], data['n_test'], data['n_dim'])
if settings.optype == 'class':
print 'n_class = %d' % (data['n_class'])
# precomputation
param, cache = precompute_minimal(data, settings)
time_init = time.clock() - time_0
print '\nCreating Mondrian forest'
# online training with minibatches
time_method_sans_init = 0.
time_prediction = 0.
mf = MondrianForest(settings, data)
if settings.store_every:
log_prob_test_minibatch = -np.inf * np.ones(settings.n_minibatches)
log_prob_train_minibatch = -np.inf * np.ones(settings.n_minibatches)
metric_test_minibatch = -np.inf * np.ones(settings.n_minibatches)
metric_train_minibatch = -np.inf * np.ones(settings.n_minibatches)
time_method_minibatch = np.inf * np.ones(settings.n_minibatches)
forest_numleaves_minibatch = np.zeros(settings.n_minibatches)
for idx_minibatch in range(settings.n_minibatches):
time_method_init = time.clock()
is_last_minibatch = (idx_minibatch == settings.n_minibatches - 1)
print_results = is_last_minibatch or (settings.verbose >= 2) or settings.debug
if print_results:
print '*' * 120
print 'idx_minibatch = %5d' % idx_minibatch
train_ids_current_minibatch = data['train_ids_partition']['current'][idx_minibatch]
if settings.debug:
print 'bagging = %s, train_ids_current_minibatch = %s' % \
(settings.bagging, train_ids_current_minibatch)
if idx_minibatch == 0:
mf.fit(data, train_ids_current_minibatch, settings, param, cache)
else:
mf.partial_fit(data, train_ids_current_minibatch, settings, param, cache)
for i_t, tree in enumerate(mf.forest):
if settings.debug or settings.verbose >= 2:
print '-'*100
tree.print_tree(settings)
print '.'*100
if settings.draw_mondrian:
tree.draw_mondrian(data, settings, idx_minibatch, i_t)
if settings.save == 1:
filename_plot = get_filename_mf(settings)[:-2]
if settings.store_every:
plt.savefig(filename_plot + '-mondrians_minibatch-' + str(idx_minibatch) + '.pdf', format='pdf')
time_method_sans_init += time.clock() - time_method_init
time_method = time_method_sans_init + time_init
# Evaluate
if is_last_minibatch or settings.store_every:
time_predictions_init = time.clock()
weights_prediction = np.ones(settings.n_mondrians) * 1.0 / settings.n_mondrians
if False:
if print_results:
print 'Results on training data (log predictive prob is bogus)'
train_ids_cumulative = data['train_ids_partition']['cumulative'][idx_minibatch]
# NOTE: some of these data points are not used for "training" if bagging is used
pred_forest_train, metrics_train = \
mf.evaluate_predictions(data, data['x_train'][train_ids_cumulative, :], \
data['y_train'][train_ids_cumulative], \
settings, param, weights_prediction, print_results)
else:
# not computing metrics on training data
metrics_train = {'log_prob': -np.inf, 'acc': 0, 'mse': np.inf}
pred_forest_train = None
if print_results:
print '\nResults on test data'
pred_forest_test, metrics_test = \
mf.evaluate_predictions(data, data['x_test'], data['y_test'], \
settings, param, weights_prediction, print_results)
name_metric = settings.name_metric # acc or mse
log_prob_train = metrics_train['log_prob']
log_prob_test = metrics_test['log_prob']
metric_train = metrics_train[name_metric]
metric_test = metrics_test[name_metric]
if settings.store_every:
log_prob_train_minibatch[idx_minibatch] = metrics_train['log_prob']
log_prob_test_minibatch[idx_minibatch] = metrics_test['log_prob']
metric_train_minibatch[idx_minibatch] = metrics_train[name_metric]
metric_test_minibatch[idx_minibatch] = metrics_test[name_metric]
time_method_minibatch[idx_minibatch] = time_method
tree_numleaves = np.zeros(settings.n_mondrians)
for i_t, tree in enumerate(mf.forest):
tree_numleaves[i_t] = len(tree.leaf_nodes)
forest_numleaves_minibatch[idx_minibatch] = np.mean(tree_numleaves)
time_prediction += time.clock() - time_predictions_init
# printing test performance:
if settings.store_every:
print 'printing test performance for every minibatch:'
print 'idx_minibatch\tmetric_test\ttime_method\tnum_leaves'
for idx_minibatch in range(settings.n_minibatches):
print '%10d\t%.3f\t\t%.3f\t\t%.1f' % \
(idx_minibatch, \
metric_test_minibatch[idx_minibatch], \
time_method_minibatch[idx_minibatch], forest_numleaves_minibatch[idx_minibatch])
print '\nFinal forest stats:'
tree_stats = np.zeros((settings.n_mondrians, 2))
tree_average_depth = np.zeros(settings.n_mondrians)
for i_t, tree in enumerate(mf.forest):
tree_stats[i_t, -2:] = np.array([len(tree.leaf_nodes), len(tree.non_leaf_nodes)])
tree_average_depth[i_t] = tree.get_average_depth(settings, data)[0]
print 'mean(num_leaves) = %.1f, mean(num_non_leaves) = %.1f, mean(tree_average_depth) = %.1f' \
% (np.mean(tree_stats[:, -2]), np.mean(tree_stats[:, -1]), np.mean(tree_average_depth))
print 'n_train = %d, log_2(n_train) = %.1f, mean(tree_average_depth) = %.1f +- %.1f' \
% (data['n_train'], np.log2(data['n_train']), np.mean(tree_average_depth), np.std(tree_average_depth))
if settings.draw_mondrian:
if settings.save == 1:
plt.savefig(filename_plot + '-mondrians-final.pdf', format='pdf')
else:
plt.show()
# Write results to disk (timing doesn't include saving)
time_total = time.clock() - time_0
# resetting
if settings.save == 1:
filename = get_filename_mf(settings)
print 'filename = ' + filename
results = {'log_prob_test': log_prob_test, 'log_prob_train': log_prob_train, \
'metric_test': metric_test, 'metric_train': metric_train, \
'time_total': time_total, 'time_method': time_method, \
'time_init': time_init, 'time_method_sans_init': time_method_sans_init,\
'time_prediction': time_prediction}
if 'log_prob2' in metrics_test:
results['log_prob2_test'] = metrics_test['log_prob2']
store_data = settings.dataset[:3] == 'toy' or settings.dataset == 'sim-reg'
if store_data:
results['data'] = data
if settings.store_every:
results['log_prob_test_minibatch'] = log_prob_test_minibatch
results['log_prob_train_minibatch'] = log_prob_train_minibatch
results['metric_test_minibatch'] = metric_test_minibatch
results['metric_train_minibatch'] = metric_train_minibatch
results['time_method_minibatch'] = time_method_minibatch
results['forest_numleaves_minibatch'] = forest_numleaves_minibatch
results['settings'] = settings
results['tree_stats'] = tree_stats
results['tree_average_depth'] = tree_average_depth
pickle.dump(results, open(filename, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
# storing final predictions as well; recreate new "results" dict
results = {'pred_forest_train': pred_forest_train, \
'pred_forest_test': pred_forest_test}
filename2 = filename[:-2] + '.tree_predictions.p'
pickle.dump(results, open(filename2, "wb"), protocol=pickle.HIGHEST_PROTOCOL)
time_total = time.clock() - time_0
print
print 'Time for initializing Mondrian forest (seconds) = %f' % (time_init)
print 'Time for executing mondrianforest.py (seconds) = %f' % (time_method_sans_init)
print 'Total time for executing mondrianforest.py, including init (seconds) = %f' % (time_method)
print 'Time for prediction/evaluation (seconds) = %f' % (time_prediction)
print 'Total time (Loading data/ initializing / running / predictions / saving) (seconds) = %f\n' % (time_total)
if __name__ == "__main__":
main()
| mit |
eXistenZNL/SickRage | lib/sqlalchemy/log.py | 79 | 6666 | # sqlalchemy/log.py
# Copyright (C) 2006-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
# Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logging control and utilities.
Control of logging for SA can be performed from the regular python logging
module. The regular dotted module namespace is used, starting at
'sqlalchemy'. For class-level logging, the class name is appended.
The "echo" keyword parameter, available on SQLA :class:`.Engine`
and :class:`.Pool` objects, corresponds to a logger specific to that
instance only.
"""
import logging
import sys
# set initial level to WARN. This so that
# log statements don't occur in the absense of explicit
# logging being enabled for 'sqlalchemy'.
rootlogger = logging.getLogger('sqlalchemy')
if rootlogger.level == logging.NOTSET:
rootlogger.setLevel(logging.WARN)
def _add_default_handler(logger):
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
logger.addHandler(handler)
_logged_classes = set()
def class_logger(cls):
logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
cls.logger = logger
_logged_classes.add(cls)
return cls
class Identified(object):
logging_name = None
def _should_log_debug(self):
return self.logger.isEnabledFor(logging.DEBUG)
def _should_log_info(self):
return self.logger.isEnabledFor(logging.INFO)
class InstanceLogger(object):
"""A logger adapter (wrapper) for :class:`.Identified` subclasses.
This allows multiple instances (e.g. Engine or Pool instances)
to share a logger, but have its verbosity controlled on a
per-instance basis.
The basic functionality is to return a logging level
which is based on an instance's echo setting.
Default implementation is:
'debug' -> logging.DEBUG
True -> logging.INFO
False -> Effective level of underlying logger
(logging.WARNING by default)
None -> same as False
"""
# Map echo settings to logger levels
_echo_map = {
None: logging.NOTSET,
False: logging.NOTSET,
True: logging.INFO,
'debug': logging.DEBUG,
}
def __init__(self, echo, name):
self.echo = echo
self.logger = logging.getLogger(name)
# if echo flag is enabled and no handlers,
# add a handler to the list
if self._echo_map[echo] <= logging.INFO \
and not self.logger.handlers:
_add_default_handler(self.logger)
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""Delegate a debug call to the underlying logger."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Delegate an info call to the underlying logger."""
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Delegate a warning call to the underlying logger."""
self.log(logging.WARNING, msg, *args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(logging.ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""Delegate an exception call to the underlying logger."""
kwargs["exc_info"] = 1
self.log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""Delegate a critical call to the underlying logger."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Delegate a log call to the underlying logger.
The level here is determined by the echo
flag as well as that of the underlying logger, and
logger._log() is called directly.
"""
# inline the logic from isEnabledFor(),
# getEffectiveLevel(), to avoid overhead.
if self.logger.manager.disable >= level:
return
selected_level = self._echo_map[self.echo]
if selected_level == logging.NOTSET:
selected_level = self.logger.getEffectiveLevel()
if level >= selected_level:
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""Is this logger enabled for level 'level'?"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getEffectiveLevel(self):
"""What's the effective level for this logger?"""
level = self._echo_map[self.echo]
if level == logging.NOTSET:
level = self.logger.getEffectiveLevel()
return level
def instance_logger(instance, echoflag=None):
"""create a logger for an instance that implements :class:`.Identified`."""
if instance.logging_name:
name = "%s.%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__, instance.logging_name)
else:
name = "%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__)
instance._echo = echoflag
if echoflag in (False, None):
# if no echo setting or False, return a Logger directly,
# avoiding overhead of filtering
logger = logging.getLogger(name)
else:
# if a specified echo flag, return an EchoLogger,
# which checks the flag, overrides normal log
# levels by calling logger._log()
logger = InstanceLogger(echoflag, name)
instance.logger = logger
class echo_property(object):
__doc__ = """\
When ``True``, enable log output for this element.
This has the effect of setting the Python logging level for the namespace
of this element's class and object reference. A value of boolean ``True``
indicates that the loglevel ``logging.INFO`` will be set for the logger,
whereas the string value ``debug`` will set the loglevel to
``logging.DEBUG``.
"""
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance._echo
def __set__(self, instance, value):
instance_logger(instance, echoflag=value)
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/260_test_select.py | 56 | 1711 | from test import test_support
import unittest
import select
import os
import sys
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
def test_select(self):
if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
if test_support.verbose:
print "Can't test select easily on", sys.platform
return
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if test_support.verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if test_support.verbose:
print repr(line)
if not line:
if test_support.verbose:
print 'EOF'
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
def test_main():
test_support.run_unittest(SelectTestCase)
test_support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
appliedx/edx-platform | lms/djangoapps/courseware/tests/test_video_mongo.py | 64 | 47519 | # -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
import ddt
import itertools
import json
from collections import OrderedDict
from lxml import etree
from mock import patch, MagicMock, Mock
from nose.plugins.attrib import attr
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from xmodule.video_module import VideoDescriptor, bumper_utils, video_utils
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_video import VideoDescriptorTestBase, instantiate_descriptor
from xmodule.tests.test_import import DummySystem
from edxval.api import (
create_profile, create_video, get_video_info, ValCannotCreateError, ValVideoNotFoundError
)
from . import BaseTestXmodule
from .test_video_xml import SOURCE_XML
from .test_video_handlers import TestVideo
@attr('shard_1')
class TestVideoYouTube(TestVideo):
METADATA = {}
def test_video_constructor(self):
"""Make sure that all parameters extracted correctly from xml"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr('shard_1')
class TestVideoNonYouTube(TestVideo):
"""Integration tests: web client + mongo."""
DATA = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA,
}
METADATA = {}
def test_video_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
@attr('shard_1')
class TestGetHtmlMethod(BaseTestXmodule):
'''
Make sure that `get_html` works correctly.
'''
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestGetHtmlMethod, self).setUp()
self.setup_course()
self.default_metadata_dict = OrderedDict({
"saveStateUrl": "",
"autoplay": settings.FEATURES.get('AUTOPLAY_VIDEOS', True),
"streams": "1.00:3_yD_cEKoCk",
"sub": "a_sub_file.srt.sjson",
"sources": '[]',
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})
def test_get_html_track(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="{sub}" download_track="{download_track}"
start_time="01:00:03" end_time="01:00:10" download_video="true"
>
<source src="example.mp4"/>
<source src="example.webm"/>
{track}
{transcripts}
</video>
"""
cases = [
{
'download_track': u'true',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'http://www.example.com/track',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'false',
'track': u'<track src="http://www.example.com/track"/>',
'sub': u'a_sub_file.srt.sjson',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': u'true',
'track': u'',
'sub': u'',
'expected_track_url': u'a_sub_file.srt.sjson',
'transcripts': '<transcript language="uk" src="ukrainian.srt" />',
},
]
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': '',
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
metadata = self.default_metadata_dict
metadata['sources'] = sources
DATA = SOURCE_XML.format(
download_track=data['download_track'],
track=data['track'],
sub=data['sub'],
transcripts=data['transcripts'],
)
self.initialize_module(data=DATA)
track_url = self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'download'
).rstrip('/?')
context = self.item_descriptor.render(STUDENT_VIEW).content
metadata.update({
'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": u'Українська'},
'transcriptLanguage': u'en' if not data['transcripts'] or data.get('sub') else u'uk',
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sub': data['sub'],
})
expected_context.update({
'transcript_download_format': (
None if self.item_descriptor.track and self.item_descriptor.download_track else 'srt'
),
'track': (
track_url if data['expected_track_url'] == u'a_sub_file.srt.sjson' else data['expected_track_url']
),
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(metadata)
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),
)
def test_get_html_source(self):
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
cases = [
# self.download_video == True
{
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': u'example.mp4',
'sources': [u'example.mp4', u'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': [],
'result': {},
},
# self.download_video == False
{
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'sources': [u'example.mp4', u'example.webm'],
},
},
]
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_non_existent_edx_video_id(self):
"""
Tests the VideoModule get_html where a edx_video_id is given but a video is not found
"""
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
no_video_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "meow",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [u'example.mp4', u'example.webm'],
}
}
DATA = SOURCE_XML.format(
download_video=no_video_data['download_video'],
source=no_video_data['source'],
sources=no_video_data['sources'],
edx_video_id=no_video_data['edx_video_id']
)
self.initialize_module(data=DATA)
# Referencing a non-existent VAL ID in courseware won't cause an error --
# it'll just fall back to the values in the VideoDescriptor.
self.assertIn("example_source.mp4", self.item_descriptor.render(STUDENT_VIEW).content)
@patch('edxval.api.get_video_info')
def test_get_html_with_mocked_edx_video_id(self, mock_get_video_info):
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': u'example',
'duration': 111.0,
'client_video_id': u'The example video',
'encoded_videos': [
{
'url': u'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': u'desktop_mp4'
}
]
}
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
# test with download_video set to false and make sure download_video_link is not set (is None)
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "mock item",
'result': {
'download_video_link': None,
# make sure the desktop_mp4 url is included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm', u'http://www.meowmix.com'],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['autoplay'] = False
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
def test_get_html_with_existing_edx_video_id(self):
# create test profiles and their encodings
encoded_videos = []
for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]:
create_profile(profile)
encoded_videos.append(
dict(
url=u"http://fake-video.edx.org/thundercats.{}".format(extension),
file_size=9000,
bitrate=42,
profile=profile,
)
)
result = create_video(
dict(
client_video_id="Thunder Cats",
duration=111,
edx_video_id="thundercats",
status='test',
encoded_videos=encoded_videos
)
)
self.assertEqual(result, "thundercats")
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="01:00:03" end_time="01:00:10"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "thundercats",
'result': {
'download_video_link': u'http://fake-video.edx.org/thundercats.mp4',
# make sure the urls for the various encodings are included as part of the alternative sources.
'sources': [u'example.mp4', u'example.webm'] +
[video['url'] for video in encoded_videos],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['sources'] = ""
initial_context = {
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata,
}
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_module(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
# pylint: disable=invalid-name
@patch('xmodule.video_module.video_module.BrandingInfoConfig')
@patch('xmodule.video_module.video_module.get_video_from_cdn')
def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig):
"""
Test if sources got from CDN.
"""
mock_BrandingInfoConfig.get_config.return_value = {
"CN": {
'url': 'http://www.xuetangx.com',
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com'
}
}
def side_effect(*args, **kwargs):
cdn = {
'http://example.com/example.mp4': 'http://cdn_example.com/example.mp4',
'http://example.com/example.webm': 'http://cdn_example.com/example.webm',
}
return cdn.get(args[1])
mocked_get_video.side_effect = side_effect
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="01:00:03" end_time="01:00:10"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
<source src="http://example.com/example.webm"/>
""",
'result': {
'download_video_link': u'example_source.mp4',
'sources': [
u'http://cdn_example.com/example.mp4',
u'http://cdn_example.com/example.webm'
],
},
}
# test with and without edx_video_id specified.
cases = [
dict(case_data, edx_video_id=""),
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'branding_info': {
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com',
'url': 'http://www.xuetangx.com'
},
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_module(data=DATA)
self.item_descriptor.xmodule_runtime.user_location = 'CN'
context = self.item_descriptor.render('student_view').content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
self.assertEqual(
context,
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
@attr('shard_1')
class TestVideoDescriptorInitialization(BaseTestXmodule):
"""
Make sure that module initialization works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super(TestVideoDescriptorInitialization, self).setUp()
self.setup_course()
def test_source_not_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertEqual(self.item_descriptor.source, 'http://example.org/video.mp4')
self.assertTrue(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
def test_source_in_html5sources(self):
metadata = {
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://example.org/video.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertTrue(self.item_descriptor.download_video)
self.assertFalse(self.item_descriptor.source_visible)
def test_download_video_is_explicitly_set(self):
metadata = {
'track': u'http://some_track.srt',
'source': 'http://example.org/video.mp4',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
'download_video': False,
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertIn('source', fields)
self.assertIn('download_video', fields)
self.assertFalse(self.item_descriptor.download_video)
self.assertTrue(self.item_descriptor.source_visible)
self.assertTrue(self.item_descriptor.download_track)
def test_source_is_empty(self):
metadata = {
'source': '',
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'],
}
self.initialize_module(metadata=metadata)
fields = self.item_descriptor.editable_metadata_fields
self.assertNotIn('source', fields)
self.assertFalse(self.item_descriptor.download_video)
@ddt.ddt
class TestVideoDescriptorStudentViewJson(TestCase):
"""
Tests for the student_view_json method on VideoDescriptor.
"""
TEST_DURATION = 111.0
TEST_PROFILE = "mobile"
TEST_SOURCE_URL = "http://www.example.com/source.mp4"
TEST_LANGUAGE = "ge"
TEST_ENCODED_VIDEO = {
'profile': TEST_PROFILE,
'bitrate': 333,
'url': 'http://example.com/video',
'file_size': 222,
}
TEST_EDX_VIDEO_ID = 'test_edx_video_id'
def setUp(self):
super(TestVideoDescriptorStudentViewJson, self).setUp()
sample_xml = (
"<video display_name='Test Video'> " +
"<source src='" + self.TEST_SOURCE_URL + "'/> " +
"<transcript language='" + self.TEST_LANGUAGE + "' src='german_translation.srt' /> " +
"</video>"
)
self.transcript_url = "transcript_url"
self.video = instantiate_descriptor(data=sample_xml)
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
def setup_val_video(self, associate_course_in_val=False):
"""
Creates a video entry in VAL.
Arguments:
associate_course - If True, associates the test course with the video in VAL.
"""
create_profile('mobile')
create_video({
'edx_video_id': self.TEST_EDX_VIDEO_ID,
'client_video_id': 'test_client_video_id',
'duration': self.TEST_DURATION,
'status': 'dummy',
'encoded_videos': [self.TEST_ENCODED_VIDEO],
'courses': [self.video.location.course_key] if associate_course_in_val else [],
})
self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init
def get_result(self, allow_cache_miss=True):
"""
Returns the result from calling the video's student_view_json method.
Arguments:
allow_cache_miss is passed in the context to the student_view_json method.
"""
context = {
"profiles": [self.TEST_PROFILE],
"allow_cache_miss": "True" if allow_cache_miss else "False"
}
return self.video.student_view_json(context)
def verify_result_with_fallback_url(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {"fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0}},
}
)
def verify_result_with_val_profile(self, result):
"""
Verifies the result is as expected when returning video data from VAL.
"""
self.assertDictContainsSubset(
result.pop("encoded_videos")[self.TEST_PROFILE],
self.TEST_ENCODED_VIDEO,
)
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": self.TEST_DURATION,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
}
)
def test_only_on_web(self):
self.video.only_on_web = True
result = self.get_result()
self.assertDictEqual(result, {"only_on_web": True})
def test_no_edx_video_id(self):
result = self.get_result()
self.verify_result_with_fallback_url(result)
@ddt.data(
*itertools.product([True, False], [True, False], [True, False])
)
@ddt.unpack
def test_with_edx_video_id(self, allow_cache_miss, video_exists_in_val, associate_course_in_val):
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
if video_exists_in_val:
self.setup_val_video(associate_course_in_val)
result = self.get_result(allow_cache_miss)
if video_exists_in_val and (associate_course_in_val or allow_cache_miss):
self.verify_result_with_val_profile(result)
else:
self.verify_result_with_fallback_url(result)
@attr('shard_1')
class VideoDescriptorTest(TestCase, VideoDescriptorTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super(VideoDescriptorTest, self).setUp()
self.descriptor.runtime.handler_url = MagicMock()
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
def test_export_val_data(self):
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """
<video download_video="false" url_name="SampleProblem">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_export_val_data_not_found(self):
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=None)
expected_str = """<video download_video="false" url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
</video_asset>
</video>
"""
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = VideoDescriptor.from_xml(xml_data, module_system, id_generator)
self.assertEqual(video.edx_video_id, 'test_edx_video_id')
video_data = get_video_info(video.edx_video_id)
self.assertEqual(video_data['client_video_id'], 'test_client_video_id')
self.assertEqual(video_data['duration'], 111)
self.assertEqual(video_data['status'], 'imported')
self.assertEqual(video_data['courses'], [id_generator.target_course_id])
self.assertEqual(video_data['encoded_videos'][0]['profile'], 'mobile')
self.assertEqual(video_data['encoded_videos'][0]['url'], 'http://example.com/video')
self.assertEqual(video_data['encoded_videos'][0]['file_size'], 222)
self.assertEqual(video_data['encoded_videos'][0]['bitrate'], 333)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/>
</video_asset>
</video>
"""
with self.assertRaises(ValCannotCreateError):
VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock())
with self.assertRaises(ValVideoNotFoundError):
get_video_info("test_edx_video_id")
class TestVideoWithBumper(TestVideo):
"""
Tests rendered content in presence of video bumper.
"""
CATEGORY = "video"
METADATA = {}
FEATURES = settings.FEATURES
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
def test_is_bumper_enabled(self, get_bumper_settings):
"""
Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True
Assume that bumper settings are correct.
"""
self.FEATURES.update({
"SHOW_BUMPER_PERIODICITY": 1,
"ENABLE_VIDEO_BUMPER": True,
})
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
with override_settings(FEATURES=self.FEATURES):
self.assertTrue(bumper_utils.is_bumper_enabled(self.item_descriptor))
self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False})
with override_settings(FEATURES=self.FEATURES):
self.assertFalse(bumper_utils.is_bumper_enabled(self.item_descriptor))
@patch('xmodule.video_module.bumper_utils.is_bumper_enabled')
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
@patch('edxval.api.get_urls_for_profiles')
def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled):
"""
Test content with rendered bumper metadata.
"""
get_url_for_profiles.return_value = {
"desktop_mp4": "http://test_bumper.mp4",
"desktop_webm": "",
}
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
is_bumper_enabled.return_value = True
content = self.item_descriptor.render(STUDENT_VIEW).content
sources = [u'example.mp4', u'example.webm']
expected_context = {
'branding_info': None,
'license': None,
'bumper_metadata': json.dumps(OrderedDict({
'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',
"showCaptions": "true",
"sources": ["http://test_bumper.mp4"],
'streams': '',
"transcriptLanguage": "en",
"transcriptLanguages": {"en": "English"},
"transcriptTranslationUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'), 'is_bumper', 1
),
"transcriptAvailableTranslationsUrl": video_utils.set_query_parameter(
self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'), 'is_bumper', 1
),
})),
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': u'A Name',
'download_video_link': u'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
"saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state",
"autoplay": False,
"streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg",
"sub": "a_sub_file.srt.sjson",
"sources": sources,
"captionDataDir": None,
"showCaptions": "true",
"generalSpeed": 1.0,
"speed": None,
"savedVideoPosition": 0.0,
"start": 3603.0,
"end": 3610.0,
"transcriptLanguage": "en",
"transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}),
"ytTestTimeout": 1500,
"ytApiUrl": "https://www.youtube.com/iframe_api",
"ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/",
"ytKey": None,
"transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
"transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
"autohideHtml5": False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': json.dumps(OrderedDict({
"url": "http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg",
"type": "youtube"
}))
}
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
self.assertEqual(content, expected_content)
| agpl-3.0 |
mganeva/mantid | MantidPlot/pymantidplot/proxies.py | 1 | 37572 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
Module containing classes that act as proxies to the various MantidPlot gui objects that are
accessible from python. They listen for the QObject 'destroyed' signal and set the wrapped
reference to None, thus ensuring that further attempts at access do not cause a crash.
"""
from __future__ import (absolute_import, division,
print_function)
from six.moves import range
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt, pyqtSlot
try:
import builtins
except ImportError:
import __builtin__ as builtins
import mantid
import mantidqtpython
#-----------------------------------------------------------------------------
#--------------------------- MultiThreaded Access ----------------------------
#-----------------------------------------------------------------------------
class CrossThreadCall(QtCore.QObject):
"""
Defines a dispatch call that marshals
function calls between threads
"""
__callable = None
__args = []
__kwargs = {}
__func_return = None
__exception = None
def __init__(self, callable):
""" Construct the object
"""
QtCore.QObject.__init__(self)
self.moveToThread(QtGui.qApp.thread())
self.__callable = callable
self.__call__.__func__.__doc__ = callable.__doc__
def dispatch(self, *args, **kwargs):
"""Dispatches a call to callable with
the given arguments using QMetaObject.invokeMethod
to ensure the call happens in the object's thread
"""
self.__args = args
self.__kwargs = kwargs
self.__func_return = None
self.__exception = None
return self._do_dispatch()
def __call__(self, *args, **kwargs):
"""
Calls the dispatch method
"""
return self.dispatch(*args, **kwargs)
@pyqtSlot()
def _do_dispatch(self):
"""Perform a call to a GUI function across a
thread and return the result
"""
if QtCore.QThread.currentThread() != QtGui.qApp.thread():
QtCore.QMetaObject.invokeMethod(self, "_do_dispatch", Qt.BlockingQueuedConnection)
else:
try:
self.__func_return = self.__callable(*self.__args, **self.__kwargs)
except Exception as exc:
self.__exception = exc
if self.__exception is not None:
raise self.__exception # Ensures this happens in the right thread
return self.__func_return
def _get_argtype(self, argument):
"""
Returns the argument type that will be passed to
the QMetaObject.invokeMethod call.
Most types pass okay, but enums don't so they have
to be coerced to ints. An enum is currently detected
as a type that is not a bool and inherits from int
"""
argtype = type(argument)
return argtype
if isinstance(argument, builtins.int) and argtype != bool:
argtype = int
return argtype
def threadsafe_call(callable, *args, **kwargs):
"""
Calls the given function with the given arguments
by passing it through the CrossThreadCall class. This
ensures that the calls to the GUI functions
happen on the correct thread.
"""
caller = CrossThreadCall(callable)
return caller.dispatch(*args, **kwargs)
def new_proxy(classType, callable, *args, **kwargs):
"""
Calls the callable object with the given args and kwargs dealing
with possible thread-safety issues.
If the returned value is not None it is wrapped in a new proxy of type classType
@param classType :: A new proxy class for the return type
@param callable :: A python callable object, i.e. a function/method
@param \*args :: The positional arguments passed on as given
@param \*kwargs :: The keyword arguments passed on as given
"""
obj = threadsafe_call(callable, *args, **kwargs)
if obj is None:
return obj
return classType(obj)
#-----------------------------------------------------------------------------
#--------------------------- Proxy Objects -----------------------------------
#-----------------------------------------------------------------------------
class QtProxyObject(QtCore.QObject):
"""Generic Proxy object for wrapping Qt C++ QObjects.
This holds the QObject internally and passes methods to it.
When the underlying object is deleted, the reference is set
to None to avoid segfaults.
"""
def __init__(self, toproxy):
QtCore.QObject.__init__(self)
self.__obj = toproxy
# Connect to track the destroyed
if self.__obj is not None:
self.connect(self.__obj, QtCore.SIGNAL("destroyed()"),
self._kill_object, Qt.DirectConnection)
def __del__(self):
"""
Disconnect the signal
"""
self._disconnect_from_destroyed()
def close(self):
"""
Reroute a method call to the the stored object
"""
self._disconnect_from_destroyed()
if hasattr(self.__obj, 'closeDependants'):
threadsafe_call(self.__obj.closeDependants)
if hasattr(self.__obj, 'close'):
threadsafe_call(self.__obj.close)
self._kill_object()
def inherits(self, className):
"""
Reroute a method call to the stored object
"""
return threadsafe_call(self.__obj.inherits, className)
def _disconnect_from_destroyed(self):
"""
Disconnects from the wrapped object's destroyed signal
"""
if self.__obj is not None:
self.disconnect(self.__obj, QtCore.SIGNAL("destroyed()"),
self._kill_object)
def __getattr__(self, attr):
"""
Reroute a method call to the the stored object via
the threadsafe call mechanism. Essentially this guarantees
that when the method is called it will be on the GUI thread
"""
callable = getattr(self._getHeldObject(), attr)
return CrossThreadCall(callable)
def __dir__(self):
return dir(self._getHeldObject())
def __str__(self):
"""
Return a string representation of the proxied object
"""
return str(self._getHeldObject())
def __repr__(self):
"""
Return a string representation of the proxied object
"""
return repr(self._getHeldObject())
def _getHeldObject(self):
"""
Returns a reference to the held object
"""
return self.__obj
def _kill_object(self):
"""
Release the stored instance
"""
self.__obj = None
def _swap(self, obj):
"""
Swap an object so that the proxy now refers to this object
"""
self.__obj = obj
#-----------------------------------------------------------------------------
class MDIWindow(QtProxyObject):
"""Proxy for the _qti.MDIWindow object.
Also used for subclasses that do not need any methods intercepted (e.g. Table, Note, Matrix)
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def folder(self):
return new_proxy(Folder, self._getHeldObject().folder)
#-----------------------------------------------------------------------------
class Graph(MDIWindow):
"""Proxy for the _qti.Graph object.
"""
# When checking the SIP interface, remember the following name mappings (PyName):
# C++ 'Multilayer' class => Python 'Graph' class
# C++ 'Graph' class => Python 'Layer' class
def __init__(self, toproxy):
MDIWindow.__init__(self,toproxy)
def activeLayer(self):
"""Get a handle to the presently active layer """
return new_proxy(Layer, self._getHeldObject().activeLayer)
def setActiveLayer(self, layer):
"""Set the active layer to that specified.
Args:
layer: A reference to the Layer to make the active one. Must belong to this Graph.
"""
threadsafe_call(self._getHeldObject().setActiveLayer, layer._getHeldObject())
def layer(self, num):
""" Get a handle to a specified layer
Args:
num: The index of the required layer
"""
return new_proxy(Layer, self._getHeldObject().layer, num)
def addLayer(self, x=0, y=0, width=None, height=None):
"""Add a layer to the graph.
Args:
x: The coordinate in pixels (from the graph's left) of the top-left of the new layer (default: 0).
y: The coordinate in pixels (from the graph's top) of the top-left of the new layer (default: 0).
width: The width of the new layer (default value if not specified).
height: The height of the new layer (default value if not specified).
Returns:
A handle to the newly created layer.
"""
# Turn the optional arguments into the magic numbers that the C++ expects
if width is None:
width=0
if height is None:
height=0
return new_proxy(Layer, self._getHeldObject().addLayer, x,y,width,height)
def insertCurve(self, graph, index):
"""Add a curve from another graph to this one.
Args:
graph: A reference to the graph from which the curve is coming (does nothing if this argument is the present Graph).
index: The index of the curve to add (counts from zero).
"""
threadsafe_call(self._getHeldObject().insertCurve, graph._getHeldObject(), index)
#-----------------------------------------------------------------------------
class Layer(QtProxyObject):
"""Proxy for the _qti.Layer object.
"""
# These methods are used for the new matplotlib-like CLI
# These ones are provided by the C++ class Graph, which in the SIP declarations is renamed as Layer
# The only purpose of listing them here is that these will be returned by this class' __dir()__, and
# shown interactively, while the ones not listed and/or overloaded here may not be shown in ipython, etc.
additional_methods = ['logLogAxes', 'logXLinY', 'logXLinY',
'removeLegend', 'export', 'setAxisScale', 'setCurveLineColor', 'setCurveLineStyle',
'setCurveLineWidth', 'setCurveSymbol', 'setScale', 'setTitle', 'setXTitle', 'setYTitle']
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def insertCurve(self, *args):
"""Add a curve from a workspace, table or another Layer to the plot
Args:
The first argument should be a reference to a workspace, table or layer, or a workspace name.
Subsequent arguments vary according to the type of the first.
Returns:
A boolean indicating success or failure.
"""
if isinstance(args[0],str):
return threadsafe_call(self._getHeldObject().insertCurve, *args)
elif hasattr(args[0], 'getName'):
return threadsafe_call(self._getHeldObject().insertCurve, args[0].name(),*args[1:])
else:
return threadsafe_call(self._getHeldObject().insertCurve, args[0]._getHeldObject(),*args[1:])
def addCurves(self, table, columns, style=0, lineWidth=1, symbolSize=3, startRow=0, endRow=-1):
"""Add curves based on table columns to the plot.
Args:
table: A reference to the table containing the data to plot.
columns: A tuple of column indices.
style: The curve style (default: Line).
lineWidth: The width of the curve line (default: 1).
symbolSize: The curve's symbol size (default: 3).
startRow: The first row to include in the curve's data (default: the first one)
endRow: The last row to include in the curve's data (default: the last one)
Returns:
A boolean indicating success or failure.
"""
return threadsafe_call(self._getHeldObject().addCurves, table._getHeldObject(),columns,style,lineWidth,symbolSize,startRow,endRow)
def addCurve(self, table, columnName, style=0, lineWidth=1, symbolSize=3, startRow=0, endRow=-1):
"""Add a curve based on a table column to the plot.
Args:
table: A reference to the table containing the data to plot.
columns: The name of the column to plot.
style: The curve style (default: Line).
lineWidth: The width of the curve line (default: 1).
symbolSize: The curve's symbol size (default: 3).
startRow: The first row to include in the curve's data (default: the first one)
endRow: The last row to include in the curve's data (default: the last one)
Returns:
A boolean indicating success or failure.
"""
return threadsafe_call(self._getHeldObject().addCurve, table._getHeldObject(),columnName,style,lineWidth,symbolSize,startRow,endRow)
def addErrorBars(self, yColName, errTable, errColName, type=1, width=1, cap=8, color=Qt.black, through=False, minus=True, plus=True):
"""Add error bars to a plot that was created from a table column.
Args:
yColName: The name of the column pertaining to the curve's data values.
errTable: A reference to the table holding the error values.
errColName: The name of the column containing the error values.
type: The orientation of the error bars - horizontal (0) or vertical (1, the default).
width: The line width of the error bars (default: 1).
cap: The length of the cap on the error bars (default: 8).
color: The color of error bars (default: black).
through: Whether the error bars are drawn through the symbol (default: no).
minus: Whether these errors should be shown as negative errors (default: yes).
plus: Whether these errors should be shown as positive errors (default: yes).
"""
threadsafe_call(self._getHeldObject().addErrorBars, yColName,errTable._getHeldObject(),errColName,type,width,cap,color,through,minus,plus)
def errorBarSettings(self, curveIndex, errorBarIndex=0):
"""Get a handle to the error bar settings for a specified curve.
Args:
curveIndex: The curve to get the settings for
errorBarIndex: A curve can hold more than one set of error bars. Specify which one (default: the first).
Note that a curve plotted from a workspace can have only one set of error bars (and hence settings).
Returns: A handle to the error bar settings object.
"""
return new_proxy(QtProxyObject, self._getHeldObject().errorBarSettings, curveIndex,errorBarIndex)
def addHistogram(self, matrix):
"""Add a matrix histogram to the graph"""
threadsafe_call(self._getHeldObject().addHistogram, matrix._getHeldObject())
def newLegend(self, text):
"""Create a new legend.
Args:
text: The text of the legend.
Returns:
A handle to the newly created legend widget.
"""
return new_proxy(QtProxyObject, self._getHeldObject().newLegend, text)
def legend(self):
"""Get a handle to the layer's legend widget."""
return new_proxy(QtProxyObject, self._getHeldObject().legend)
def grid(self):
"""Get a handle to the grid object for this layer."""
return new_proxy(QtProxyObject, self._getHeldObject().grid)
def spectrogram(self):
"""If the layer contains a spectrogram, get a handle to the spectrogram object."""
return new_proxy(QtProxyObject, self._getHeldObject().spectrogram)
def __dir__(self):
"""Returns the list of attributes of this object."""
# The first part (explicitly defined ones) are here for the traditional Mantid CLI,
# the additional ones have been added for the matplotlib-like CLI (without explicit
# declaration/documentation here in the proxies layer.
return ['insertCurve', 'addCurves', 'addCurve', 'addErrorBars', 'errorBarSettings', 'addHistogram',
'newLegend', 'legend', 'grid', 'spectrogram' ] + self.additional_methods
#-----------------------------------------------------------------------------
class Graph3D(QtProxyObject):
"""Proxy for the _qti.Graph3D object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def setData(self, table, colName, type=0):
"""Set a table column to be the data source for this plot.
Args:
table: A reference to the table.
colName: The name of the column to set as the data source.
type: The plot type.
"""
threadsafe_call(self._getHeldObject().setData, table._getHeldObject(),colName,type)
def setMatrix(self, matrix):
"""Set a matrix (N.B. not a MantidMatrix) to be the data source for this plot.
Args:
matrix: A reference to the matrix.
"""
threadsafe_call(self._getHeldObject().setMatrix, matrix._getHeldObject())
#-----------------------------------------------------------------------------
class Spectrogram(QtProxyObject):
"""Proxy for the _qti.Spectrogram object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def matrix(self):
"""Get a handle to the data source."""
return new_proxy(QtProxyObject, self._getHeldObject().matrix)
#-----------------------------------------------------------------------------
class Folder(QtProxyObject):
"""Proxy for the _qti.Folder object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def windows(self):
"""Get a list of the windows in this folder"""
f = self._getHeldObject().windows()
ret = []
for item in f:
ret.append(MDIWindow(item))
return ret
def folders(self):
"""Get a list of the subfolders of this folder"""
f = self._getHeldObject().folders()
ret = []
for item in f:
ret.append(Folder(item))
return ret
def folder(self, name, caseSensitive=True, partialMatch=False):
"""Get a handle to a named subfolder.
Args:
name: The name of the subfolder.
caseSensitive: Whether to search case-sensitively or not (default: yes).
partialMatch: Whether to return a partial match (default: no).
Returns:
A handle to the requested folder, or None if no match found.
"""
return new_proxy(Folder, self._getHeldObject().folder, name,caseSensitive,partialMatch)
def findWindow(self, name, searchOnName=True, searchOnLabel=True, caseSensitive=False, partialMatch=True):
"""Get a handle to the first window matching the search criteria.
Args:
name: The name of the window.
searchOnName: Whether to search the window names (takes precedence over searchOnLabel).
searchOnLabel: Whether to search the window labels.
caseSensitive: Whether to search case-sensitively or not (default: no).
partialMatch: Whether to return a partial match (default: yes).
Returns:
A handle to the requested window, or None if no match found.
"""
return new_proxy(MDIWindow, self._getHeldObject().findWindow, name,searchOnName,searchOnLabel,caseSensitive,partialMatch)
def window(self, name, cls='MdiSubWindow', recursive=False):
"""Get a handle to a named window of a particular type.
Args:
name: The name of the window.
cls: Search only for windows of type inheriting from this class (N.B. This is the C++ class name).
recursive: If True, do a depth-first recursive search (default: False).
Returns:
A handle to the window, or None if no match found.
"""
return new_proxy(MDIWindow, self._getHeldObject().window, name,cls,recursive)
def table(self, name, recursive=False):
"""Get a handle to the table with the given name.
Args:
name: The name of the table to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(MDIWindow, self._getHeldObject().table, name,recursive)
def matrix(self, name, recursive=False):
"""Get a handle to the matrix with the given name.
Args:
name: The name of the matrix to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(MDIWindow, self._getHeldObject().matrix, name,recursive)
def graph(self, name, recursive=False):
"""Get a handle to the graph with the given name.
Args:
name: The name of the graph to search for.
recursive: If True, do a depth-first recursive search (default: False).
"""
return new_proxy(Graph, self._getHeldObject().graph, name,recursive)
def rootFolder(self):
"""Get the folder at the root of the hierarchy"""
return new_proxy(Folder, self._getHeldObject().rootFolder)
#-----------------------------------------------------------------------------
class MantidMatrix(MDIWindow):
"""Proxy for the _qti.MantidMatrix object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def plotGraph3D(self, style=3):
"""Create a 3D plot of the workspace data.
Args:
style: The qwt3d plotstyle of the generated graph (default: filled mesh)
Returns:
A handle to the newly created graph (a Graph3D object)
"""
return new_proxy(Graph3D, self._getHeldObject().plotGraph3D, style)
def plotGraph2D(self, type=16):
"""Create a spectrogram from the workspace data.
Args:
type: The style of the plot (default: ColorMap)
Returns:
A handle the newly created graph (a Graph object)
"""
return new_proxy(Graph, self._getHeldObject().plotGraph2D, type)
#-----------------------------------------------------------------------------
class InstrumentView(MDIWindow):
"""Proxy for the instrument window
"""
def __init__(self, toproxy):
"""Creates a proxy object around an instrument window
Args:
toproxy: The raw C object to proxy
"""
QtProxyObject.__init__(self, toproxy)
def getTab(self, name_or_tab):
"""Retrieve a handle to the given tab
Args:
name_or_index: A string containing the title or tab type
Returns:
A handle to a tab widget
"""
handle = new_proxy(QtProxyObject, self._getHeldObject().getTab, name_or_tab)
if handle is None:
raise ValueError("Invalid tab title '%s'" % str(name_or_tab))
return handle
# ----- Deprecated functions -----
def changeColormap(self, filename=None):
import warnings
warnings.warn("InstrumentWidget.changeColormap has been deprecated. Use the render tab method instead.")
callable = QtProxyObject.__getattr__(self, "changeColormap")
if filename is None:
callable()
else:
callable(filename)
def setColorMapMinValue(self, value):
import warnings
warnings.warn("InstrumentWidget.setColorMapMinValue has been deprecated. Use the render tab setMinValue method instead.")
QtProxyObject.__getattr__(self, "setColorMapMinValue")(value)
def setColorMapMaxValue(self, value):
import warnings
warnings.warn("InstrumentWidget.setColorMapMaxValue has been deprecated. Use the render tab setMaxValue method instead.")
QtProxyObject.__getattr__(self, "setColorMapMaxValue")(value)
def setColorMapRange(self, minvalue, maxvalue):
import warnings
warnings.warn("InstrumentWidget.setColorMapRange has been deprecated. Use the render tab setRange method instead.")
QtProxyObject.__getattr__(self, "setColorMapRange")(minvalue,maxvalue)
def setScaleType(self, scale_type):
import warnings
warnings.warn("InstrumentWidget.setScaleType has been deprecated. Use the render tab setScaleType method instead.")
QtProxyObject.__getattr__(self, "setScaleType")(scale_type)
def setViewType(self, view_type):
import warnings
warnings.warn("InstrumentWidget.setViewType has been deprecated. Use the render tab setSurfaceType method instead.")
QtProxyObject.__getattr__(self, "setViewType")(view_type)
def selectComponent(self, name):
import warnings
warnings.warn("InstrumentWidget.selectComponent has been deprecated. Use the tree tab selectComponentByName method instead.")
QtProxyObject.__getattr__(self, "selectComponent")(name)
#-----------------------------------------------------------------------------
class SliceViewerWindowProxy(QtProxyObject):
"""Proxy for a C++ SliceViewerWindow object.
It will pass-through method calls that can be applied to the
SliceViewer widget contained within.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __getattr__(self, attr):
"""
Reroute a method call to the the stored object
"""
if self._getHeldObject() is None:
raise Exception("Error! The SliceViewerWindow has been deleted.")
# Pass-through to the contained SliceViewer widget.
sv = self.getSlicer()
# But only those attributes that are methods on the SliceViewer
if attr in SliceViewerProxy.slicer_methods:
return getattr(sv, attr)
else:
# Otherwise, pass through to the stored object
return getattr(self._getHeldObject(), attr)
def __str__(self):
"""
Return a string representation of the proxied object
"""
if self._getHeldObject() is None:
return "None"
else:
return 'SliceViewerWindow(workspace="%s")' % self._getHeldObject().getSlicer().getWorkspaceName()
def __repr__(self):
"""
Return a string representation of the proxied object
"""
return repr(self._getHeldObject())
def __dir__(self):
"""
Returns the list of attributes for this object.
Might allow tab-completion to work under ipython
"""
return SliceViewerProxy.slicer_methods + ['showLine']
def getLiner(self):
"""
Returns the LineViewer widget that is part of this
SliceViewerWindow
"""
return LineViewerProxy(self._getHeldObject().getLiner())
def getSlicer(self):
"""
Returns the SliceViewer widget that is part of this
SliceViewerWindow
"""
return SliceViewerProxy(self._getHeldObject().getSlicer())
def showLine(self, start, end, width=None, planar_width=0.1, thicknesses=None,
num_bins=100):
"""Opens the LineViewer and define a 1D line along which to integrate.
The line is created in the same XY dimensions and at the same slice
point as is currently shown in the SliceViewer.
Args:
start :: (X,Y) coordinates of the start point in the XY dimensions
of the current slice.
end :: (X,Y) coordinates of the end point in the XY dimensions
of the current slice.
width :: if specified, sets all the widths (planar and other
dimensions) to this integration width.
planar_width :: sets the XY-planar (perpendicular to the line)
integration width. Default 0.1.
thicknesses :: list with one thickness value for each dimension in the
workspace (including the XY dimensions, which are ignored).
e.g. [0,1,2,3] in a XYZT workspace.
num_bins :: number of bins by which to divide the line.
Default 100.
Returns:
The LineViewer object of the SliceViewerWindow. There are methods
available to modify the line drawn.
"""
# First show the lineviewer
self.getSlicer().toggleLineMode(True)
liner = self.getLiner()
# Start and end point
liner.setStartXY(start[0], start[1])
liner.setEndXY(end[0], end[1])
# Set the width.
if not width is None:
liner.setThickness(width)
liner.setPlanarWidth(width*0.5)
else:
liner.setPlanarWidth(planar_width*0.5)
if not thicknesses is None:
for d in range(len(thicknesses)):
liner.setThickness(d, thicknesses[d])
# Bins
liner.setNumBins(num_bins)
liner.apply()
# Return the proxy to the LineViewer widget
return liner
#-----------------------------------------------------------------------------
def getWorkspaceNames(source):
"""Takes a "source", which could be a WorkspaceGroup, or a list
of workspaces, or a list of names, and converts
it to a list of workspace names.
Args:
source :: input list or workspace group
Returns:
list of workspace names
"""
ws_names = []
if isinstance(source, list) or isinstance(source,tuple):
for w in source:
names = getWorkspaceNames(w)
ws_names += names
elif hasattr(source, 'name'):
if hasattr(source, '_getHeldObject'):
wspace = source._getHeldObject()
else:
wspace = source
if wspace == None:
return []
if hasattr(wspace, 'getNames'):
grp_names = wspace.getNames()
for n in grp_names:
if n != wspace.name():
ws_names.append(n)
else:
ws_names.append(wspace.name())
elif isinstance(source,str):
w = None
try:
# for non-existent names this raises a KeyError
w = mantid.AnalysisDataService.Instance()[source]
except Exception as exc:
raise ValueError("Workspace '%s' not found!"%source)
if w != None:
names = getWorkspaceNames(w)
for n in names:
ws_names.append(n)
else:
raise TypeError('Incorrect type passed as workspace argument "' + str(source) + '"')
return ws_names
#-----------------------------------------------------------------------------
class ProxyCompositePeaksPresenter(QtProxyObject):
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def getPeaksPresenter(self, source):
to_present = None
if isinstance(source, str):
to_present = source
elif isinstance(source, mantid.api.Workspace):
to_present = source.name()
else:
raise ValueError("getPeaksPresenter expects a Workspace name or a Workspace object.")
if not mantid.api.mtd.doesExist(to_present):
raise ValueError("%s does not exist in the workspace list" % to_present)
return new_proxy(QtProxyObject, self._getHeldObject().getPeaksPresenter, to_present)
#-----------------------------------------------------------------------------
class SliceViewerProxy(QtProxyObject):
"""Proxy for a C++ SliceViewer widget.
"""
# These are the exposed python method names
slicer_methods = ["setWorkspace", "getWorkspaceName", "showControls", "openFromXML", "getImage", "saveImage", "copyImageToClipboard", "setFastRender", "getFastRender", "toggleLineMode", "setXYDim", "setXYDim", "getDimX", "getDimY", "setSlicePoint", "setSlicePoint", "getSlicePoint", "getSlicePoint", "setXYLimits", "getXLimits", "getYLimits", "zoomBy", "setXYCenter", "resetZoom", "loadColorMap", "setColorScale", "setColorScaleMin", "setColorScaleMax", "setColorScaleLog", "getColorScaleMin", "getColorScaleMax", "getColorScaleLog", "setColorScaleAutoFull", "setColorScaleAutoSlice", "setColorMapBackground", "setTransparentZeros", "setNormalization", "getNormalization", "setRebinThickness", "setRebinNumBins", "setRebinMode", "setPeaksWorkspaces", "refreshRebin"]
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __dir__(self):
"""Returns the list of attributes for this object. """
return self.slicer_methods()
def setPeaksWorkspaces(self, source):
workspace_names = getWorkspaceNames(source)
if len(workspace_names) == 0:
raise ValueError("No workspace names given to setPeaksWorkspaces")
for name in workspace_names:
if not mantid.api.mtd.doesExist(name):
raise ValueError("%s does not exist in the workspace list" % name)
if not isinstance(mantid.api.mtd[name], mantid.api.IPeaksWorkspace):
raise ValueError("%s is not an IPeaksWorkspace" % name)
return new_proxy(ProxyCompositePeaksPresenter, self._getHeldObject().setPeaksWorkspaces, workspace_names)
#-----------------------------------------------------------------------------
class LineViewerProxy(QtProxyObject):
"""Proxy for a C++ LineViewer widget.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self, toproxy)
def __dir__(self):
"""Returns the list of attributes for this object. """
return ["apply", "showPreview", "showFull", "setStartXY", "setEndXY", "setThickness", "setThickness",
"setThickness", "setPlanarWidth", "getPlanarWidth", "setNumBins", "setFixedBinWidthMode", "getFixedBinWidth",
"getFixedBinWidthMode", "getNumBins", "getBinWidth", "setPlotAxis", "getPlotAxis"]
#-----------------------------------------------------------------------------
class FitBrowserProxy(QtProxyObject):
"""
Proxy for the FitPropertyBrowser object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
#-----------------------------------------------------------------------------
class TiledWindowProxy(QtProxyObject):
"""
Proxy for the TiledWindow object.
"""
def __init__(self, toproxy):
QtProxyObject.__init__(self,toproxy)
def addWidget(self, tile, row, col):
"""
Add a new sub-window at a given position in the layout.
The layout will re-shape itself if necessary to fit in the new tile.
Args:
tile :: An MdiSubWindow to add.
row :: A row index at which to place the new tile.
col :: A column index at which to place the new tile.
"""
threadsafe_call(self._getHeldObject().addWidget, tile._getHeldObject(), row, col)
def insertWidget(self, tile, row, col):
"""
Insert a new sub-window at a given position in the layout.
The widgets to the right and below the inserted tile will be shifted
towards the bottom of the window. If necessary a new row will be appended.
The number of columns doesn't change.
Args:
tile :: An MdiSubWindow to insert.
row :: A row index at which to place the new tile.
col :: A column index at which to place the new tile.
"""
threadsafe_call(self._getHeldObject().insertWidget, tile._getHeldObject(), row, col)
def getWidget(self, row, col):
"""
Get a sub-window at a location in this TiledWindow.
Args:
row :: A row of a sub-window.
col :: A column of a sub-window.
"""
return MDIWindow( threadsafe_call(self._getHeldObject().getWidget, row, col) )
def clear(self):
"""
Clear the content this TiledWindow.
"""
threadsafe_call(self._getHeldObject().clear)
def showHelpPage(page_name=None):
"""Show a page in the help system"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showHelpPage, page_name)
def showWikiPage(page_name=None):
"""Show a wiki page through the help system"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showWikiPage, page_name)
def showAlgorithmHelp(algorithm=None, version=-1):
"""Show an algorithm help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showAlgorithmHelp, algorithm, version)
def showConceptHelp(name=None):
"""Show a concept help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showConceptHelp, name)
def showFitFunctionHelp(name=None):
"""Show a fit function help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showFitFunctionHelp, name)
def showCustomInterfaceHelp(name=None):
"""Show a custom interface help page"""
window = threadsafe_call(mantidqtpython.MantidQt.API.InterfaceManager().showCustomInterfaceHelp, name)
| gpl-3.0 |
mic-e/cythondemo | pxdgen.py | 1 | 9344 | """
Auto-generates PXD files from annotated C++ headers.
"""
import re
import os
from pygments.token import Token
from pygments.lexers import get_lexer_for_filename
class ParserError(Exception):
"""
Represents a fatal parsing error in PXDGenerator.
"""
def __init__(self, filename, lineno, message):
super().__init__("{}:{} {}".format(filename, lineno, message))
class PXDGenerator:
"""
Represents, and performs, a single conversion of a C++ header file to a
PXD file.
@param infilename:
input (C++ header) file name. is opened and read.
@param outfilename:
output (pxd) file name. is opened and written.
"""
def __init__(self, infilename, outfilename):
self.infilename = infilename
self.outfilename = outfilename
# current parsing state (not valid until self.parse() is called)
self.stack, self.lineno, self.annotations = None, None, None
def parser_error(self, message, lineno=None):
"""
Returns a ParserError object for this generator, at the current line.
"""
if lineno is None:
lineno = self.lineno
return ParserError(self.infilename, lineno, message)
def tokenize(self):
"""
Tokenizes the input file.
Yields (tokentype, val) pairs, where val is a string.
The concatenation of all val strings is equal to the input file's
content.
"""
# contains all namespaces and other '{' tokens
self.stack = []
# current line number
self.lineno = 1
# we're using the pygments lexer (mainly because that was the first
# google hit for 'python c++ lexer', and it's fairly awesome to use)
lexer = get_lexer_for_filename('.cpp')
with open(self.infilename) as infile:
code = infile.read()
for token, val in lexer.get_tokens(code):
# ignore whitespaces
yield token, val
self.lineno += val.count('\n')
def handle_singleline_comment(self, val):
"""
Breaks down a '//'-style single-line comment, and passes the result
to handle_comment()
@param val:
the comment text, as string, including the '//'
"""
try:
val = re.match('^// (.*)$', val).group(1)
except AttributeError as ex:
raise self.parser_error("invalid single-line comment") from ex
self.handle_comment(val)
def handle_multiline_comment(self, val):
"""
Breaks down a '/* */'-style multi-line comment, and passes the result
to handle_comment()
@param val:
the comment text, as string, including the '/*' and '*/'
"""
try:
val = re.match('^/\\*(.*)\\*/$', val, re.DOTALL).group(1)
except AttributeError as ex:
raise self.parser_error("invalid multi-line comment") from ex
# for a comment '/* foo\n * bar\n */', val is now 'foo\n * bar\n '
# however, we'd prefer ' * foo\n * bar'
val = ' * ' + val.rstrip()
# actually, we'd prefer [' * foo', ' * bar'].
lines = val.split('\n')
comment_lines = []
for idx, line in enumerate(lines):
try:
line = re.match('^ \\*( (.*))?$', line).group(2) or ""
except AttributeError as ex:
raise self.parser_error("invalid multi-line comment line",
idx + self.lineno) from ex
# if comment is still empty, don't append anything
if comment_lines or line.strip() != "":
comment_lines.append(line)
self.handle_comment('\n'.join(comment_lines).rstrip())
def handle_comment(self, val):
"""
Handles any comment, with its format characters removed,
extracting the pxd annotation
"""
annotations = re.findall('pxd:\\s(.*?)(:pxd|$)', val, re.DOTALL)
annotations = [annotation[0] for annotation in annotations]
if not annotations:
raise self.parser_error("comment contains no valid pxd annotation")
for annotation in annotations:
# remove empty lines at end
annotation = annotation.rstrip()
annotation_lines = annotation.split('\n')
for idx, line in enumerate(annotation_lines):
if line.strip() != "":
# we've found the first non-empty annotation line
self.add_annotation(annotation_lines[idx:])
break
else:
raise self.parser_error("pxd annotation is empty:\n" + val)
def add_annotation(self, annotation_lines):
"""
Adds a (current namespace, pxd annotation) tuple to self.annotations.
"""
if "{" in self.stack:
raise self.parser_error("PXD annotation is brace-enclosed")
elif not self.stack:
namespace = None
else:
namespace = "::".join(self.stack)
self.annotations.append((namespace, annotation_lines))
def handle_token(self, token, val):
"""
Handles one token while the parser is in its regular state.
Returns the new state integer.
"""
# accept any token here
if token == Token.Keyword and val == 'namespace':
# advance to next state on 'namespace'
return 1
elif (token, val) == (Token.Punctuation, '{'):
self.stack.append('{')
elif (token, val) == (Token.Punctuation, '}'):
try:
self.stack.pop()
except IndexError as ex:
raise self.parser_error("unmatched '}'") from ex
elif token == Token.Comment.Single and 'pxd:' in val:
self.handle_singleline_comment(val)
elif token == Token.Comment.Multiline and 'pxd:' in val:
self.handle_multiline_comment(val)
else:
# we don't care about all those other tokens
pass
return 0
def parse(self):
"""
Parses the input file.
Internally calls self.tokenize().
Adds all found PXD annotations to self.annotations,
together with info about the namespace in which they were encountered.
"""
self.annotations = []
state = 0
for token, val in self.tokenize():
# ignore whitespaces
if token == Token.Text and not val.strip():
continue
if state == 0:
state = self.handle_token(token, val)
elif state == 1:
# we're inside a namespace definition; expect Token.Name
if token != Token.Name:
raise self.parser_error(
"expected identifier after 'namespace'")
state = 2
self.stack.append(val)
elif state == 2:
# expect {
if (token, val) != (Token.Punctuation, '{'):
raise self.parser_error("expected '{' after 'namespace " +
self.stack[-1] + "'")
state = 0
if self.stack:
raise self.parser_error("expected '}', but found EOF")
def get_pxd_lines(self):
"""
calls self.parse() and processes the pxd annotations to pxd code lines.
"""
yield "# this PXD definition file was auto-generated from {}".format(
self.infilename)
self.parse()
# namespace of the previous pxd annotation
previous_namespace = None
for namespace, annotation_lines in self.annotations:
yield ""
if namespace != previous_namespace:
yield ""
if namespace:
prefix = " "
if namespace != previous_namespace:
yield 'cdef extern from "{}" namespace "{}":'.format(
os.path.relpath(self.infilename,
os.path.dirname(self.outfilename)),
namespace)
else:
prefix = ""
for annotation in annotation_lines:
yield prefix + annotation
previous_namespace = namespace
def generate(self):
"""
reads the input file and writes the output file.
on parsing failure, raises ParserError
"""
try:
with open(self.outfilename, 'w') as outfile:
for line in self.get_pxd_lines():
outfile.write(line)
outfile.write('\n')
except ParserError:
os.remove(self.outfilename)
raise
def main():
""" main function """
import argparse
cli = argparse.ArgumentParser()
cli.add_argument('input', help="input header file")
cli.add_argument('--output', '-o', help="output filename", default=None)
args = cli.parse_args()
infilename, outfilename = args.input, args.output
if outfilename is None:
outfilename = os.path.splitext(infilename)[0] + '.pxd'
PXDGenerator(infilename, outfilename).generate()
if __name__ == '__main__':
main()
| gpl-3.0 |
bratsche/Neutron-Drive | google_appengine/lib/django_1_2/django/contrib/gis/gdal/prototypes/errcheck.py | 404 | 4207 | """
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import check_err, OGRException, SRSException
from django.contrib.gis.gdal.libgdal import lgdal
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-refernece value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
def check_bool(result, func, cargs):
"Returns the boolean evaluation of the value."
if bool(result): return True
else: return False
### String checking Routines ###
def check_const_string(result, func, cargs, offset=None):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr: s = None
else: s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory beind GDAL pointer
# w/the VSIFree routine.
if ptr: lgdal.VSIFree(ptr)
return s
### DataSource, Layer error-checking ###
### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
### Other error-checking routines ###
def check_arg_errcode(result, func, cargs):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs))
return result
def check_errcode(result, func, cargs):
"""
Check the error code returned (c_int).
"""
check_err(result)
return
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if isinstance(result, (int, long)):
result = c_void_p(result)
if bool(result):
return result
else:
raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and tring values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value
| bsd-3-clause |
lmazuel/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/time_window_py3.py | 1 | 4310 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TimeWindow(Model):
"""A specific date-time for the profile.
All required parameters must be populated in order to send to Azure.
:param time_zone: the timezone of the start and end times for the profile.
Some examples of valid timezones are: Dateline Standard Time, UTC-11,
Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time
(Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain
Standard Time (Mexico), Mountain Standard Time, Central America Standard
Time, Central Standard Time, Central Standard Time (Mexico), Canada
Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US
Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time,
Atlantic Standard Time, Central Brazilian Standard Time, SA Western
Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E.
South America Standard Time, Argentina Standard Time, SA Eastern Standard
Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard
Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde
Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich
Standard Time, W. Europe Standard Time, Central Europe Standard Time,
Romance Standard Time, Central European Standard Time, W. Central Africa
Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard
Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time,
E. Europe Standard Time, South Africa Standard Time, FLE Standard Time,
Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time,
Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus
Standard Time, Russian Standard Time, E. Africa Standard Time, Iran
Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia
Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus
Standard Time, Afghanistan Standard Time, West Asia Standard Time,
Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time,
Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time,
Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard
Time, SE Asia Standard Time, North Asia Standard Time, China Standard
Time, North Asia East Standard Time, Singapore Standard Time, W. Australia
Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo
Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia
Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS
Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time,
Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10,
Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard
Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard
Time, Samoa Standard Time, Line Islands Standard Time
:type time_zone: str
:param start: Required. the start time for the profile in ISO 8601 format.
:type start: datetime
:param end: Required. the end time for the profile in ISO 8601 format.
:type end: datetime
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'time_zone': {'key': 'timeZone', 'type': 'str'},
'start': {'key': 'start', 'type': 'iso-8601'},
'end': {'key': 'end', 'type': 'iso-8601'},
}
def __init__(self, *, start, end, time_zone: str=None, **kwargs) -> None:
super(TimeWindow, self).__init__(**kwargs)
self.time_zone = time_zone
self.start = start
self.end = end
| mit |
dmccloskey/SBaaS_isotopomer | SBaaS_isotopomer/stage01_isotopomer_peakSpectrum_execute.py | 1 | 35411 | #SBaaS
from .stage01_isotopomer_peakSpectrum_io import stage01_isotopomer_peakSpectrum_io
from .stage01_isotopomer_peakData_query import stage01_isotopomer_peakData_query
from .stage01_isotopomer_normalized_query import stage01_isotopomer_normalized_query
from SBaaS_LIMS.lims_msMethod_query import lims_msMethod_query
#Resources
from MDV_utilities.mass_isotopomer_distributions import mass_isotopomer_distributions
#Remove after refactor
from .stage01_isotopomer_peakSpectrum_postgresql_models import *
from .stage01_isotopomer_normalized_postgresql_models import *
import numpy
import re
from molmass.molmass import Formula
class stage01_isotopomer_peakSpectrum_execute(stage01_isotopomer_peakSpectrum_io,
stage01_isotopomer_peakData_query,
stage01_isotopomer_normalized_query,
lims_msMethod_query):
def execute_buildSpectrumFromPeakData(self,experiment_id_I,ms_methodtype_I='isotopomer_13C',sample_name_abbreviations_I = None,met_ids_I = None):
'''Build spectrum from raw peak data'''
'''Assumptions:
Only 1 precursur:spectrum per sample name and
only 1 precursor:spectrum per dilution
(i.e. the best/most representative precursor:spectrum was chose from the
available EPI scans and dilutions of that particular precursor)
'''
mids = mass_isotopomer_distributions();
# extract out the peakSpectrum
# get sample names for the experiment
print('execute_buildSpectrumFromPeakData...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakData(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleType_peakData(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
# create database table
for sn_cnt,sn in enumerate(sample_names):
print('building spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,replicate_numbers = None,None,None;
sample_name_abbreviation,time_point,replicate_numbers = self.get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakData(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.get_scanType_experimentIDAndSampleName_peakData(experiment_id_I,sn);
for scantype in scan_type:
print('building spectrum for scan type ' + scantype);
# get met_id and precursor formula for each sample
if met_ids_I:
met_id, precursor_formula = [], [];
for met in met_ids_I:
met_id_tmp, precursor_formula_tmp = [], []
met_id_tmp, precursor_formula_tmp = self.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanTypeAndMetID_peakData(experiment_id_I,sn,scantype,met);
met_id.extend(met_id_tmp);
precursor_formula.extend(precursor_formula_tmp);
else:
met_id, precursor_formula = [], [];
met_id, precursor_formula = self.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakData(experiment_id_I,sn,scantype);
for precursor_cnt, precursor in enumerate(precursor_formula):
print('building spectrum for met_id/precursor ' + met_id[precursor_cnt] + '/' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
precursor_mass = Formula(precursor_str).isotope.mass
# get all product fragments for the met_id/precursor
precursor_formulas_monoisotopic, product_formulas = [], [];
precursor_formulas_monoisotopic, product_formulas = self.get_precursorAndProductFormulas_metID(met_id[precursor_cnt],'-','tuning');
product_formulas.append(precursor_formulas_monoisotopic[0]); # add precursor to list of fragments
# get peak data for the sample/met_id/precursor_formula
peak_data = [];
peak_data = self.get_data_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakData(experiment_id_I,sn,met_id[precursor_cnt],precursor,scantype);
peakSpectrum_measured,\
peakSpectrum_corrected, peakSpectrum_normalized = mids.extract_peakData_normMax(\
peak_data, product_formulas, 0.3, True);
peakSpectrum_stats,peakSpectrum_theoretical = mids.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_normalized
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
row1 = None;
row1 = data_stage01_isotopomer_peakSpectrum(experiment_id_I,sn,sample_name_abbreviation,
sample_types_lst[sn_cnt],time_point,replicate_numbers,
met_id[precursor_cnt],precursor,int(numpy.round(precursor_mass)),
frag,int(numpy.round(k)),
peakSpectrum_measured[frag][k],'cps',
peakSpectrum_corrected[frag][k],'cps',
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True,None);
self.session.add(row1);
self.session.commit();
def execute_updatePeakSpectrum(self,experiment_id_I,sample_name_abbreviations_I = None):
'''re-calculate intensity_normalized from intensity_corrected and used'''
mids = mass_isotopomer_distributions();
# extract out the peakSpectrum
dataListUpdated = [];
# get sample names for the experiment
print('execute_updatePeakSpectrum...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakSpectrum(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleType_peakSpectrum(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
# create database table
for sn_cnt,sn in enumerate(sample_names):
print('updating peak spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,replicate_numbers = None,None,None;
sample_name_abbreviation,time_point,replicate_numbers = self.get_sampleNameAbbreviationsAndTimePointAndReplicateNumber_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.get_scanType_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
for scantype in scan_type:
print('building spectrum for scan type ' + scantype);
# get met_id and precursor formula for each sample
met_id, precursor_formula = [], [];
met_id, precursor_formula = self.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakSpectrum(experiment_id_I,sn,scantype);
for precursor_cnt, precursor in enumerate(precursor_formula):
print('updating peak spectrum for met_id/precursor ' + met_id[precursor_cnt] + '/' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
precursor_mass = Formula(precursor_str).isotope.mass
# get all product fragments for the met_id/precursor
precursor_formulas_monoisotopic, product_formulas = [], [];
precursor_formulas_monoisotopic, product_formulas = self.get_precursorAndProductFormulas_metID(met_id[precursor_cnt],'-','tuning');
product_formulas.append(precursor_formulas_monoisotopic[0]); # add precursor to list of fragments
# get peak data for the sample/met_id/precursor_formula
peak_data = [];
peak_data = self.get_data_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met_id[precursor_cnt],precursor,scantype);
peakSpectrum_corrected, peakSpectrum_normalized = mids.extract_peakList_normMax(\
peak_data, product_formulas,True);
peakSpectrum_stats,peakSpectrum_theoretical = mids.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
dataListUpdated.append({'experiment_id':experiment_id_I,
'sample_name':sn,
'sample_name_abbreviation':sample_name_abbreviation,
'sample_type':sample_types_lst[sn_cnt],
'time_point':time_point,
'replicate_number':replicate_numbers,
'met_id':met_id[precursor_cnt],
'precursor_formula':precursor,
'precursor_mass':int(numpy.round(precursor_mass)),
'product_formula':frag,
'product_mass':int(numpy.round(k)),
'intensity_corrected':peakSpectrum_corrected[frag][k],
'intensity_corrected_units':'cps',
'intensity_normalized':peakSpectrum_normalized[frag][k],
'intensity_normalized_units':'normMax',
'intensity_theoretical':v,
'abs_devFromTheoretical':peakSpectrum_stats[frag][k]['absDev'],
'scan_type':scantype});
self.update_data_stage01_isotopomer_peakSpectrum(dataListUpdated);
def execute_filterValidatedFragments(self,experiment_id_I):
'''Filter fragments that have been validated by a U12C reference experiment'''
from .stage01_isotopomer_peakSpectrum_dependencies import isotopomer_13C_fragments_validated
print('filtering validated met/fragment pairs...')
dataUpdate_O = [];
for k,v in isotopomer_13C_fragments_validated.items():
for frag in v:
dataUpdate_O.append({'experiment_id':experiment_id_I,'met_id':k,'product_formula':frag});
self.update_validFragments_stage01_isotopomer_peakSpectrum(dataUpdate_O);
def execute_normalizeSpectrumFromReference(self,experiment_id_I,sample_name_abbreviations_I = None, use_mrm_ref = True, met_ids_I = None):
# 1. import used peak spectrum to normalized table after multiplying by measured
# scaling factor calculated from used MRM spectrum
# 2. be sure that the MRMs in the normalized table have been finalized
'''NOTES:
cannot follow the forloop pattern used in buildSpectrumFromPeakData (i.e. starting with sample name)
must use the forloop pattern similar to updateNormalizedSpectrum, but without a forloop for dilutions
(i.e. time-point to sample name abbreviations to scan types to mets)
buildSpectrumFromPeakData and updatePeakSpectrum methods process one product:spectrum from a single precursor at a time;
each precursor:product:spectrum is associated with only one sample name
However, because the entire range of precursor:product:spectrum for a given met can encompass multiple dilutions and therefore different
sample names, a more generic approach must be used'''
'''Assumptions:
only a single precursor:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
mids = mass_isotopomer_distributions();
# extract out the peakSpectrum
print('execute_normalizeSpectrumFromReference...')
# get time points
time_points = [];
time_points = self.get_timePoints_experimentID_peakSpectrum(experiment_id_I);
for tp in time_points:
print('normalizing peak spectrum from reference for time-point ' + tp);
# get sample name abbreviations
if sample_name_abbreviations_I:
sample_name_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_name_abbreviations_tmp = [];
sample_name_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_peakSpectrum(experiment_id_I,st,tp);
sample_name_abbreviations.extend([sna for sna in sample_name_abbreviations_tmp if sna in sample_name_abbreviations_I]);
sample_types_lst.extend([st for sna in sample_name_abbreviations_tmp if sna in sample_name_abbreviations_I]);
else:
sample_name_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_name_abbreviations_tmp = [];
sample_name_abbreviations_tmp = self.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_peakSpectrum(experiment_id_I,st,tp);
sample_name_abbreviations.extend(sample_name_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_name_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_name_abbreviations):
print('normalizing peak spectrum from reference for sample name abbreviation ' + sna);
# get scan types
scan_type = [];
scan_type = self.get_scanType_experimentIDAndTimePointSampleNameAbbreviation_peakSpectrum(experiment_id_I,tp,sna);
for scantype in scan_type:
print('normalizing peak spectrum from reference for scan type ' + scantype);
# get replicates
replicate_numbers = [];
replicate_numbers = self.get_replicateNumber_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(experiment_id_I,tp,sna,scantype);
for rep in replicate_numbers:
print('normalizing peak spectrum from reference for replicate ' + str(rep));
# get other information about the sample for later use
sample_name, dilution = None,None;
sample_name,dilution = self.get_sampleNameAndDilution_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep);
# get met_id
if met_ids_I:
met_id = met_ids_I;
else:
med_id = [];
met_id = self.get_metID_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicate_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep);
for met_cnt,met in enumerate(met_id):
print('normalizing peak spectrum from reference for met_id ' + met);
# get precursor formula and mass
precursor_formula, precursor_mass = [], [];
precursor_formula, precursor_mass = self.get_precursorFormulaAndMass_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetID_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met);
peak_data_all = {};
scaling_factors_all = {};
for precursor_cnt, precursor in enumerate(precursor_formula):
peak_data_all[precursor] = None;
scaling_factors_all[precursor] = None;
print('normalizing peak spectrum from reference for precursor ' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
# get all product fragments for the met_id/precursor
product_formulas = [];
product_formulas = self.get_productFormulas_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetIDAndPrecursorFormula_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met,precursor);
# get the m+0 precursor_formula
precursor_formula_monoisotopic = self.get_precursorFormula_metID(met,'-','tuning');
precursor_monoisotopic_str = re.sub('[+-]', '', precursor_formula_monoisotopic);
precursor_monoisotpoic_mass = int(numpy.round(Formula(precursor_monoisotopic_str).isotope.mass));
# get peakSpectrum data
peak_data = {};
peak_data = self.get_data_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetIDAndPrecursorFormula_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met,precursor);
peak_data_all[precursor] = peak_data;
if scantype == 'ER':
scaling_factors_all[precursor] = 1.0; # there is no need to scale ER or other precursor ion scans
else:
if use_mrm_ref:
# get reference MRM spectrum scaling factor for the sample
#scaling_factor,scaling_factor_cv = None,None; # will need to incorporate propogation of error
#scaling_factor,scaling_factor_cv = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Averages(experiment_id_I,sample_name_abbreviation,time_point,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
scaling_factor = None; # does not require the propogation of error
scaling_factor = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Normalized(experiment_id_I,sna,tp,rep,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = mids.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
else:
# get reference ER spectrum scaling factor for the sample
scaling_factor = None;
scaling_factor = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndPrecursorFormulaAndMassAndScanType_peakSpectrum(experiment_id_I,sna,tp,rep,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'ER');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = mids.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
# normalize spectrum to reference MRM for each precursor (m+0,m+1,...)
peakSpectrum_normalized = mids.normalize_peakSpectrum_normMax(peak_data_all,scaling_factors_all);
peakSpectrum_stats,peakSpectrum_theoretical = mids.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if k in peakSpectrum_normalized[frag]:
row = None;
row = data_stage01_isotopomer_normalized(experiment_id_I,sample_name,sna,sample_types_lst[sna_cnt],tp,dilution,rep,
met,frag,int(numpy.round(k)),
#None,'cps',None,'cps',
None,'cps',peakSpectrum_normalized[frag][k],'normMax', #allows for spectrum updates
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True,None);
self.session.add(row);
self.session.commit();
def execute_normalizeSpectrumFromReference_v1(self,experiment_id_I,sample_name_abbreviations_I = None,use_mrm_ref = True):
# 1. import used peak spectrum to normalized table after multiplying by measured
# scaling factor calculated from used MRM spectrum
# 2. be sure that the MRMs in the normalized table have been finalized
'''NOTES: Broken for the following reason:
cannot follow the forloop pattern used in buildSpectrumFromPeakData (i.e. starting with sample name)
must use the forloop pattern used in updateNormalizedSpectrum (i.e. time-point to dilutions to sample name abbreviations to scan types to mets)
buildSpectrumFromPeakData and updatePeakSpectrum methods process one product:spectrum from a single precursor at a time;
each precursor:product:spectrum is associated with only one sample name
However, because the entire range of precursor:product:spectrum for a given met can encompass multiple dilutions and therefore different
sample names, a more generic approach must be used
Please use current version'''
mids = mass_isotopomer_distributions();
# extract out the peakSpectrum
# get sample name for the experiment
print('execute_normalizeSpectrumFromReference...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakSpectrum(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.get_sampleNames_experimentIDAndSampleType_peakSpectrum(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
for sn_cnt,sn in enumerate(sample_names):
print('normalizing peak spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,dilution,replicate_numbers = None,None,None,None;
sample_name_abbreviation,time_point,dilution,replicate_numbers = self.get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.get_scanType_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
for scantype in scan_type:
print('normalizing spectrum for scan type ' + scantype);
# get met_id
med_id = [];
met_id = self.get_metID_experimentIDAndSampleNameAndScanType_peakSpectrum(experiment_id_I,sn,scantype);
for met in met_id:
print('normalizing peak spectrum for met_id ' + met);
# get precursor formula and mass
precursor_formula, precursor_mass = [], [];
precursor_formula, precursor_mass = self.get_precursorFormulaAndMass_experimentIDAndSampleNameAndMetIDAndScanType_peakSpectrum(experiment_id_I,sn,met,scantype);
peak_data_all = {};
scaling_factors_all = {};
for precursor_cnt, precursor in enumerate(precursor_formula):
peak_data_all[precursor] = None;
scaling_factors_all[precursor] = None;
print('normalizing peak spectrum for precursor ' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
# get all product fragments for the met_id/precursor
product_formulas = [];
product_formulas = self.get_productFormulas_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met,precursor,scantype);
# get the m+0 precursor_formula
precursor_formula_monoisotopic = self.get_precursorFormula_metID(met,'-','tuning');
precursor_monoisotopic_str = re.sub('[+-]', '', precursor_formula_monoisotopic);
precursor_monoisotpoic_mass = int(numpy.round(Formula(precursor_monoisotopic_str).isotope.mass));
# get peakSpectrum data
peak_data = {};
#Change to sna+rep+timepoint:peak_data = self.get_normalizedIntensity_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met,precursor,scantype);
peak_data_all[precursor] = peak_data;
if scantype == 'ER':
scaling_factors_all[precursor] = 1.0; # there is no need to scale ER or other precursor ion scans
else:
if use_mrm_ref:
# get reference MRM spectrum scaling factor for the sample
#scaling_factor,scaling_factor_cv = None,None; # will need to incorporate propogation of error
#scaling_factor,scaling_factor_cv = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Averages(experiment_id_I,sample_name_abbreviation,time_point,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
scaling_factor = None; # does not require the propogation of error
scaling_factor = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Normalized(experiment_id_I,sample_name_abbreviation,time_point,replicate_numbers,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = mids.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
else:
# get reference ER spectrum scaling factor for the sample
scaling_factor = None;
scaling_factor = self.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndPrecursorFormulaAndMassAndScanType_peakSpectrum(experiment_id_I,sample_name_abbreviation,time_point,replicate_numbers,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'ER');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = mids.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
# normalize spectrum to reference MRM for each precursor (m+0,m+1,...)
peakSpectrum_normalized = mids.normalize_peakSpectrum_normMax(peak_data_all,scaling_factors_all);
peakSpectrum_stats,peakSpectrum_theoretical = mids.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if k in peakSpectrum_normalized[frag]:
row = None;
row = data_stage01_isotopomer_normalized(experiment_id_I,sn,sample_name_abbreviation,sample_types_lst[sn_cnt],time_point,dilution,replicate_numbers,
met,frag,int(numpy.round(k)),
None,'cps',None,'cps',
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True);
self.session.add(row);
self.session.commit();
| mit |
nickgentoo/scikit-learn-graph | scripts/Online_PassiveAggressive_ReservoirHashKernels_notanhTABLES.py | 1 | 10510 | # -*- coding: utf-8 -*-
"""
python -m scripts/Online_PassiveAggressive_countmeansketch LMdata 3 1 a ODDST 0.01
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from copy import copy
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import sys
from skgraph.feature_extraction.graph.ODDSTVectorizer import ODDSTVectorizer
from skgraph.feature_extraction.graph.WLVectorizer import WLVectorizer
from sklearn.linear_model import PassiveAggressiveClassifier as PAC
from skgraph.datasets import load_graph_datasets
import numpy as np
from scipy.sparse import csc_matrix
from sklearn.utils import compute_class_weight
from scipy.sparse import csr_matrix
from skgraph.utils.countminsketch_TABLESrandomprojectionNEWLinear import CountMinSketch
from itertools import izip
import time
if __name__=='__main__':
start_time = time.time()
if len(sys.argv)<1:
sys.exit("python ODDKernel_example.py dataset r l filename kernel C m seed")
dataset=sys.argv[1]
max_radius=int(sys.argv[2])
la=float(sys.argv[3])
#hashs=int(sys.argv[3])
njobs=1
name=str(sys.argv[4])
kernel=sys.argv[5]
C=float(sys.argv[6])
m=int(sys.argv[7])
rs=int(sys.argv[8])
#lr=float(sys.argv[7])
#FIXED PARAMETERS
normalization=False
#working with Chemical
g_it=load_graph_datasets.dispatch(dataset)
f=open(name,'w')
#At this point, one_hot_encoding contains the encoding for each symbol in the alphabet
if kernel=="WL":
print "Lambda ignored"
print "Using WL fast subtree kernel"
Vectorizer=WLVectorizer(r=max_radius,normalization=normalization)
elif kernel=="ODDST":
print "Using ST kernel"
Vectorizer=ODDSTVectorizer(r=max_radius,l=la,normalization=normalization)
elif kernel=="NSPDK":
print "Using NSPDK kernel, lambda parameter interpreted as d"
Vectorizer=NSPDKVectorizer(r=max_radius,d=int(la),normalization=normalization)
else:
print "Unrecognized kernel"
#TODO the C parameter should probably be optimized
#print zip(_letters, _one_hot)
#exit()
features=Vectorizer.transform(g_it.graphs) #Parallel ,njobs
print "examples, features", features.shape
features_time=time.time()
print("Computed features in %s seconds ---" % (features_time - start_time))
errors=0
tp=0
fp=0
tn=0
fn=0
predictions=[0]*50
correct=[0]*50
#print ESN
#netDataSet=[]
#netTargetSet=[]
#netKeyList=[]
BERtotal=[]
bintargets=[1,-1]
#print features
#print list_for_deep.keys()
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
sizes=[5000]*50
transformer=CountMinSketch(m,features.shape[1],rs)
WCMS=np.zeros(shape=(m,1))
cms_creation=0.0
for i in xrange(features.shape[0]):
time1=time.time()
ex=features[i][0].T
exCMS=transformer.transform(ex)
#print "exCMS", type(exCMS), exCMS.shape
target=g_it.target[i]
#W=csr_matrix(ex)
#dot=0.0
module=np.dot(exCMS.T,exCMS)[0,0]
#print "module", module
time2=time.time()
cms_creation+=time2 - time1
dot=np.dot(WCMS.T,exCMS)
#print "dot", dot
#print "dot:", dot, "dotCMS:",dot1
if (np.sign(dot) != target ):
#print "error on example",i, "predicted:", dot, "correct:", target
errors+=1
if target==1:
fn+=1
else:
fp+=1
else:
#print "correct classification", target
if target==1:
tp+=1
else:
tn+=1
if(target==1):
coef=(part_minus+1.0)/(part_plus+part_minus+1.0)
part_plus+=1
else:
coef=(part_plus+1.0)/(part_plus+part_minus+1.0)
part_minus+=1
tao = min (C, max (0.0,( (1.0 - target*dot )*coef) / module ) );
if (tao > 0.0):
WCMS+=(exCMS*(tao*target))
# for row,col in zip(rows,cols):
# ((row,col), ex[row,col])
# #print col, ex[row,col]
# WCMS.add(col,target*tao*ex[row,col])
#print "Correct prediction example",i, "pred", score, "target",target
if i%50==0 and i!=0:
#output performance statistics every 50 examples
if (tn+fp) > 0:
pos_part= float(fp) / (tn+fp)
else:
pos_part=0
if (tp+fn) > 0:
neg_part=float(fn) / (tp+fn)
else:
neg_part=0
BER = 0.5 * ( pos_part + neg_part)
print "1-BER Window esempio ",i, (1.0 - BER)
f.write("1-BER Window esempio "+str(i)+" "+str(1.0 - BER)+"\n")
#print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
BERtotal.append(1.0 - BER)
tp = 0
fp = 0
fn = 0
tn = 0
part_plus=0
part_minus=0
end_time=time.time()
print("Learning phase time %s seconds ---" % (end_time - features_time )) #- cms_creation
print("Total time %s seconds ---" % (end_time - start_time))
print "BER AVG", str(np.average(BERtotal)),"std", np.std(BERtotal)
f.write("BER AVG "+ str(np.average(BERtotal))+" std "+str(np.std(BERtotal))+"\n")
f.close()
transformer.removetmp()
#print "N_features", ex.shape
#generate explicit W from CountMeanSketch
#print W
#raw_input("W (output)")
#==============================================================================
#
# tao = /*(double)labels->get_label(idx_a) **/ min (C, max (0.0,(1.0 - (((double)labels->get_label(idx_a))*(classe_mod) )) * c_plus ) / modulo_test);
#
# #W=W_old #dump line
#
#
# #set the weights of PA to the predicted values
# PassiveAggressive.coef_=W
# pred=PassiveAggressive.predict(ex)
#
# score=PassiveAggressive.decision_function(ex)
#
# bintargets.append(target)
# if pred!=target:
# errors+=1
# print "Error",errors," on example",i, "pred", score, "target",target
# if target==1:
# fn+=1
# else:
# fp+=1
#
# else:
# if target==1:
# tp+=1
# else:
# tn+=1
# #print "Correct prediction example",i, "pred", score, "target",target
#
# else:
# #first example is always an error!
# pred=0
# score=0
# errors+=1
# print "Error",errors," on example",i
# if g_it.target[i]==1:
# fn+=1
# else:
# fp+=1
# #print i
# if i%50==0 and i!=0:
# #output performance statistics every 50 examples
# if (tn+fp) > 0:
# pos_part= float(fp) / (tn+fp)
# else:
# pos_part=0
# if (tp+fn) > 0:
# neg_part=float(fn) / (tp+fn)
# else:
# neg_part=0
# BER = 0.5 * ( pos_part + neg_part)
# print "1-BER Window esempio ",i, (1.0 - BER)
# print>>f,"1-BER Window esempio "+str(i)+" "+str(1.0 - BER)
# BERtotal.append(1.0 - BER)
# tp = 0
# fp = 0
# fn = 0
# tn = 0
# bintargets=[1,-1]
# #print features[0][i]
# #print features[0][i].shape
# #f=features[0][i,:]
# #print f.shape
# #print f.shape
# #print g_it.target[i]
# #third parameter is compulsory just for the first call
# print "prediction", pred, score
# #print "intecept",PassiveAggressive.intercept_
# #raw_input()
# if abs(score)<1.0 or pred!=g_it.target[i]:
#
# ClassWeight=compute_class_weight('auto',np.asarray([1,-1]),bintargets)
# #print "class weights", {1:ClassWeight[0],-1:ClassWeight[1]}
# PassiveAggressive.class_weight={1:ClassWeight[0],-1:ClassWeight[1]}
#
# PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# #PassiveAggressive.partial_fit(ex,np.array([g_it.target[i]]),np.unique(g_it.target))
# W_old=PassiveAggressive.coef_
#
#
# #ESN target---#
# netTargetSet=[]
# for key,rowDict in list_for_deep[i].iteritems():
#
#
# target=np.asarray( [np.asarray([W_old[0,key]])]*len(rowDict))
#
#
# netTargetSet.append(target)
#
#
#
#
# #------------ESN TargetSetset--------------------#
# # ESN Training
#
# #for ftDataset,ftTargetSet in zip(netDataSet,netTargetSet):
# #print "Input"
# #print netDataSet
# #raw_input("Output")
# #print netTargetSet
# #raw_input("Target")
# model.OnlineTrain(netDataSet,netTargetSet,lr)
# #raw_input("TR")
# #calcolo statistiche
#
# print "BER AVG", sum(BERtotal) / float(len(BERtotal))
# print>>f,"BER AVG "+str(sum(BERtotal) / float(len(BERtotal)))
# f.close()
#==============================================================================
| gpl-3.0 |
bradleypj823/swift | swift/common/manager.py | 3 | 24766 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
PROC_DIR = '/proc'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-reconciler',
'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer', 'container-reconciler']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers * disks * threads_per_disk, can get high
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print(_("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_("WARNING: Unable to modify memory limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print(_("WARNING: Unable to modify max process limit. "
"Running as non-root?"))
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
class UnknownCommandError(Exception):
pass
class Manager(object):
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
self.server_names = set()
for server in servers:
if server == 'all':
self.server_names.update(ALL_SERVERS)
elif server == 'main':
self.server_names.update(MAIN_SERVERS)
elif server == 'rest':
self.server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
self.server_names.update([
s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
self.server_names.add(server)
self.servers = set()
for name in self.server_names:
self.servers.add(Server(name, run_dir))
def __iter__(self):
return iter(self.servers)
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print(_('\nuser quit'))
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print(_('No %s running') % server)
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print(_("%s (%s) appears to have stopped") % (server, killed_pid))
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print(_('Waited %s seconds for %s to die; giving up') % (
kill_wait, server))
return 1
@command
def kill(self, **kwargs):
"""stop a server (no error if not running)
"""
status = self.stop(**kwargs)
kwargs['quiet'] = True
if status and not self.status(**kwargs):
# only exit error if the server is still running
return status
return 0
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.server_names:
m = Manager([server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server(object):
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
self.server = server.lower()
if '.' in self.server:
self.server, self.conf = self.server.rsplit('.', 1)
else:
self.conf = None
if '-' not in self.server:
self.server = '%s-server' % self.server
self.type = self.server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % self.server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
server_search = self.server
else:
server_search = "%s-server" % self.type
if self.conf is not None:
found_conf_files = search_tree(SWIFT_DIR, server_search,
self.conf + '.conf',
dir_ext=self.conf + '.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, server_search + '*',
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
if number:
print(_('Unable to locate config number %s for %s' % (
number, self.server)))
else:
print(_('Unable to locate config for %s' % (self.server)))
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print(_('Found configs:'))
for i, conf_file in enumerate(found_conf_files):
print(' %d) %s' % (i + 1, conf_file))
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
if self.conf is not None:
pid_files = search_tree(self.run_dir, '%s*' % self.server,
exts=[self.conf + '.pid',
self.conf + '.pid.d'])
else:
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
try:
pid = int(open(pid_file).read().strip())
except ValueError:
pid = None
yield pid_file, pid
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
if not pid: # Catches None and 0
print (_('Removing pid file %s with invalid pid') % pid_file)
remove_file(pid_file)
continue
try:
if sig != signal.SIG_DFL:
print(_('Signal %s pid: %s signal: %s') % (self.server,
pid, sig))
safe_kill(pid, sig, 'swift-%s' % self.server)
except InvalidPidFileException as e:
if kwargs.get('verbose'):
print(_('Removing pid file %s with wrong pid %d') % (
pid_file, pid))
remove_file(pid_file)
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print(_("Removing stale pid file %s") % pid_file)
remove_file(pid_file)
elif e.errno == errno.EPERM:
print(_("No permission to signal PID %d") % pid)
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print(_("%s #%d not running (%s)") % (self.server, number,
conf_files[0]))
else:
print(_("No %s running") % self.server)
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print(_("%s running (%s - %s)") % (self.server, pid, conf_file))
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if false ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if kwargs.get('once', False):
# if you don't want once to wait you can send it to the
# background on the command line, I generally just run with
# no-daemon anyway, but this is quieter
proc.wait()
if output:
print(output)
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return {}
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print(_("%s running (%s - %s)") %
(self.server, pid, conf_file))
elif not kwargs.get('number', 0):
already_started = True
print(_("%s running (%s - %s)") % (self.server, pid, pid_file))
if already_started:
print(_("%s already started...") % self.server)
return {}
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print('%s...(%s)' % (msg, conf_file))
try:
pid = self.spawn(conf_file, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
# TODO(clayg): should I check if self.cmd exists earlier?
print(_("%s does not exist") % self.cmd)
break
else:
raise
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
| apache-2.0 |
donaghhorgan/rhythmbox-plugins-rating-filters | release/3.0/RatingFilters.py | 3 | 15274 | # -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# RatingFilters.py
#
# Rating filters for the library browser.
# Copyright (C) 2013 Donagh Horgan <donagh.horgan@gmail.com>
#
# Preferences class code adapted from Magnatune plugin and CoverArt plugin
# Copyright (C) 2006 Adam Zimmerman <adam_zimmerman@sfu.ca>
# Copyright (C) 2006 James Livingston <doclivingston@gmail.com>
# Copyright (C) 2012 - fossfreedom
# Copyright (C) 2012 - Agustin Carrasco
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from gi.repository import Peas
from gi.repository import RB
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import Gio
from gi.repository import PeasGtk
import rb
class RatingFiltersPlugin (GObject.Object, Peas.Activatable):
'''
Main class for the RatingFilters plugin. Contains functions for setting
up the UI, callbacks for user actions, and functions for filtering query
models and refreshing the display.
'''
object = GObject.property (type = GObject.Object)
def __init__(self):
GObject.Object.__init__(self)
def log(self, function_name, message, error=False):
'''
Generic function for logging - will save Python 3 related slip ups.
'''
if error:
message_type = 'ERROR'
else:
message_type = 'DEBUG'
print(function_name + ': ' + message_type + ': ' + message)
def do_activate(self):
'''
Creates and links UI elements and creates class variables.
'''
self.log(self.do_activate.__name__, 'Activating plugin...')
self.settings = Gio.Settings(
'org.gnome.rhythmbox.plugins.rating_filters'
)
self.settings.connect(
'changed::favourites-threshold',
self.on_favourites_threshold_changed
)
app = Gio.Application.get_default()
self.app_id = 'rating-filters'
self.filter_names = ['All Ratings', 'Favourites', 'Unrated']
self.target_values = {
'All Ratings': GLib.Variant.new_string('rating-filters-all-ratings'),
'Favourites': GLib.Variant.new_string('rating-filters-favourites'),
'Unrated': GLib.Variant.new_string('rating-filters-unrated')
}
self.locations = ['library-toolbar', 'playlist-toolbar']
self.visited_pages = {}
self.active_filter = {}
action_name = 'rating-filters'
self.action = Gio.SimpleAction.new_stateful(
action_name, GLib.VariantType.new('s'),
self.target_values['All Ratings']
)
self.action.connect("activate", self.filter_change_cb)
app.add_action(self.action)
menu_item = Gio.MenuItem()
section = Gio.Menu()
menu = Gio.Menu()
toolbar_item = Gio.MenuItem()
for filter_name in self.filter_names:
menu_item.set_label(filter_name)
menu_item.set_action_and_target_value(
'app.' + action_name, self.target_values[filter_name]
)
section.append_item(menu_item)
menu.append_section(None, section)
toolbar_item.set_label('Filter')
toolbar_item.set_submenu(menu)
for location in self.locations:
app.add_plugin_menu_item(location, self.app_id, toolbar_item)
def do_deactivate(self):
'''
Unlinks UI elements and resets entry views.
'''
self.log(self.do_deactivate.__name__, 'Deactivating plugin...')
for page in self.visited_pages:
[_, query_models, t] = self.visited_pages[page]
self.visited_pages[page] = ['All Ratings', query_models, t]
self.refresh(page)
app = Gio.Application.get_default()
for location in self.locations:
app.remove_plugin_menu_item(location, self.app_id)
def target_value_to_filter_name(self, target_value):
'''
Converts target values to filter names.
'''
filter_names = dict(zip(self.target_values.values(), self.target_values.keys()))
return filter_names[target_value]
def filter_change_cb(self, action, current):
'''
Called when the filter state on a page is changed. Sets the new
state and triggers a refresh of the entry view.
'''
action.set_state(current)
shell = self.object
page = shell.props.selected_page
self.active_filter[page] = self.target_value_to_filter_name(current)
self.change_filter()
def change_filter(self):
'''
Changes the filter model on the selected page.
'''
if len(self.visited_pages) == 0:
self.set_callbacks() # set callbacks on first run
shell = self.object
page = shell.props.selected_page
self.log(
self.change_filter.__name__,
'Changing filter on ' + page.props.name
)
t = self.get_favourites_threshold()
active_filter = self.active_filter[page]
if page in self.visited_pages:
[_, query_models, t0] = self.visited_pages[page]
if (active_filter not in query_models or
(active_filter == 'Favourites' and t0 != t)):
query_models[active_filter] = self.filter_query_model(
active_filter, query_models['All Ratings']
)
self.visited_pages[page] = [active_filter, query_models, t]
self.refresh(page)
else:
query_models = {}
query_model = page.get_entry_view().props.model
query_models['All Ratings'] = self.filter_query_model(
'All Ratings', query_model
)
query_models[active_filter] = self.filter_query_model(
active_filter, query_model
)
self.visited_pages[page] = [active_filter, query_models, t]
self.refresh(page)
def set_callbacks(self):
'''
Sets callbacks to detect UI interactions, should be called only
after the rating filters are first activated.
'''
shell = self.object
shell.props.display_page_tree.connect(
"selected", self.on_page_change
)
shell.props.selected_page.connect(
"filter-changed", self.on_browser_change
)
shell.props.db.connect(
'entry-changed', self.on_entry_change
)
def get_favourites_threshold(self):
'''
Returns the current favourites threshold.
'''
return self.settings['favourites-threshold']
def on_favourites_threshold_changed(self, settings, key):
'''
Refreshes the view when the favourites threshold preference is
changed.
'''
shell = self.object
page = shell.props.selected_page
self.log(
self.on_favourites_threshold_changed.__name__,
'Favourites threshold changed on ' + page.props.name
)
if page in self.active_filter:
if self.active_filter[page] == 'Favourites':
self.change_filter()
def on_entry_change(self, db, entry, changes):
'''
Called when an entry in the current view is changed. If the user has
changed a track's rating, and the new rating should be filtered out,
then the page is refreshed.
'''
# This isn't working like it used to: the changes object no longer
# has a values property, so we can't check to see what was changed.
# For now, we'll just refresh everything each time something is
# changed, although this isn't ideal.
#change = changes.values
#if change.prop is RB.RhythmDBPropType.RATING:
if True:
for page in self.visited_pages:
[active_filter, query_models, t] = self.visited_pages[page]
query_model = query_models['All Ratings']
entries = [row[0] for row in query_model]
if entry in entries:
if "Favourites" in query_models:
del query_models["Favourites"]
if "Unrated" in query_models:
del query_models["Unrated"]
self.visited_pages[page] = [
active_filter,query_models, t
]
shell = self.object
self.on_page_change(None, shell.props.selected_page)
def on_browser_change(self, action):
'''
Called when the library browser for a visited page changes. Reapplies
the active filter to the new query model.
'''
shell = self.object
page = shell.props.selected_page
self.log(
self.on_browser_change.__name__,
"Browser changed on page " + page.props.name
)
query_models = {}
query_model = page.get_entry_view().props.model
active_filter = 'All Ratings'
query_models[active_filter] = self.filter_query_model(
active_filter, query_model
)
[active_filter, _, t] = self.visited_pages[page]
query_models[active_filter] = self.filter_query_model(
active_filter, query_model
)
self.visited_pages[page] = [active_filter, query_models, t]
self.refresh(page)
def on_page_change(self, display_page_tree, page):
'''
Called when the display page changes. Grabs query models and sets the
active filter.
'''
self.log(
self.on_page_change.__name__,
"Page changed to " + page.props.name
)
shell = self.object
t = self.get_favourites_threshold()
if (type(page) == RB.PlaylistSource or
type(page) == RB.AutoPlaylistSource or
page == shell.props.library_source):
if page in self.visited_pages:
[active_filter, query_models, t0] = self.visited_pages[page]
if ((active_filter == "Favourites" and t0 != t) or
active_filter not in query_models):
query_models[active_filter] = self.filter_query_model(
active_filter, query_models['All Ratings']
)
self.visited_pages[page] = [
active_filter, query_models, t
]
self.action.set_state(self.target_values[active_filter])
self.refresh(page)
else:
query_models = {}
query_model = page.get_entry_view().props.model
active_filter = 'All Ratings'
query_models[active_filter] = self.filter_query_model(
active_filter, query_model
)
self.visited_pages[page] = [active_filter, query_models, t]
self.action.set_state(self.target_values[active_filter])
page.connect("filter-changed", self.on_browser_change)
def filter_query_model(self, active_filter, query_model):
'''
Applies the active filter to the supplied query model and returns
the result.
'''
self.log(
self.filter_query_model.__name__,
"Creating new query model for " + active_filter
)
shell = self.object
db = shell.props.db
new_query_model = RB.RhythmDBQueryModel.new_empty(db)
if active_filter == 'All Ratings':
new_query_model = query_model
else:
if active_filter == "Favourites":
ratings = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
t = self.get_favourites_threshold()
ratings = ratings[t:]
else:
ratings = [0.0]
for row in query_model:
entry = row[0]
entry_rating = entry.get_double(RB.RhythmDBPropType.RATING)
if entry_rating in ratings:
new_query_model.add_entry(entry, -1)
return new_query_model
def refresh(self, page):
'''
Refreshes the entry view on the specified page.
'''
[active_filter, query_models, t] = self.visited_pages[page]
self.log(
self.refresh.__name__,
"Applying '" + active_filter + "' to " + page.props.name
)
query_model = query_models[active_filter]
entry_view = page.get_entry_view()
sorting_type = entry_view.get_sorting_type()
entry_view.set_model(query_model)
entry_view.set_sorting_type(sorting_type)
page.props.query_model = query_model
class Preferences(GObject.Object, PeasGtk.Configurable):
'''
Preferences for the RatingFilters plugin. It holds the settings for the
plugin and is also responsible for creating the preferences dialog.
'''
__gtype_name__ = 'RatingFiltersPreferences'
object = GObject.property(type=GObject.Object)
ratings = [5, 4, 3, 2, 1]
def __init__(self):
GObject.Object.__init__(self)
def do_create_configure_widget(self):
'''
Creates the plugin's preferences dialog
'''
settings = Gio.Settings('org.gnome.rhythmbox.plugins.rating_filters')
def favourites_threshold_changed(button):
settings['favourites-threshold'] = self.ratings[
button.get_active()
]
print('Changing favourites threshold to ' + str(settings['favourites-threshold']))
self.configure_callback_dic = {
"favourites_rating_threshold_combobox_changed_cb": favourites_threshold_changed
}
# Create dialog
builder = Gtk.Builder()
builder.add_from_file(
rb.find_plugin_file(self, 'RatingFiltersPreferences.ui')
)
# Bind dialog to settings
builder.get_object("favourites_threshold_combobox").set_active(
self.ratings.index(settings['favourites-threshold'])
)
builder.connect_signals(self.configure_callback_dic)
# Return dialog
return builder.get_object('main')
| gpl-3.0 |
ofermend/medicare-demo | socialite/jython/Lib/pickle.py | 6 | 44792 | """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision$" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not callable(func):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType == UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
inst.__dict__.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0L)
''
>>> encode_long(255L)
'\xff\x00'
>>> encode_long(32767L)
'\xff\x7f'
>>> encode_long(-256L)
'\x00\xff'
>>> encode_long(-32768L)
'\x00\x80'
>>> encode_long(-128L)
'\x80'
>>> encode_long(127L)
'\x7f'
>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| apache-2.0 |
benoitsteiner/tensorflow | tensorflow/python/kernel_tests/distributions/categorical_test.py | 17 | 13485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return categorical.Categorical(logits, dtype=dtype)
class CategoricalTest(test.TestCase):
def testP(self):
p = [0.2, 0.8]
dist = categorical.Categorical(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = categorical.Categorical(logits=logits)
with self.test_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
# event_size is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10, tensor_util.constant_value(dist.event_size))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(
batch_shape, constant_op.constant(
10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertEqual(10, dist.event_size.eval())
def testDtype(self):
dist = make_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(
dist.logits.dtype, dist.prob(np.array(
0, dtype=np.int64)).dtype)
self.assertEqual(
dist.logits.dtype, dist.log_prob(np.array(
0, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.test_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = categorical.Categorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertEqual(1, sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([1, 0], sample_value_batch)
def testPMFWithBatch(self):
histograms = [[0.2, 0.8], [0.6, 0.4]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob([0, 1]).eval(), [0.2, 0.4])
def testPMFNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
with self.test_session():
self.assertAllClose(dist.prob(0).eval(), 0.2)
def testCDFWithDynamicEventShape(self):
"""Test that dynamically-sized events with unknown shape work."""
batch_size = 2
histograms = array_ops.placeholder(dtype=dtypes.float32,
shape=(batch_size, None))
event = array_ops.placeholder(dtype=dtypes.float32, shape=(batch_size,))
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
# Feed values into the placeholder with different shapes
# three classes.
event_feed_one = [0, 1]
histograms_feed_one = [[0.5, 0.3, 0.2], [1.0, 0.0, 0.0]]
expected_cdf_one = [0.0, 1.0]
feed_dict_one = {
histograms: histograms_feed_one,
event: event_feed_one
}
# six classes.
event_feed_two = [2, 5]
histograms_feed_two = [[0.9, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.15, 0.2, 0.05, 0.35, 0.13, 0.12]]
expected_cdf_two = [0.9, 0.88]
feed_dict_two = {
histograms: histograms_feed_two,
event: event_feed_two
}
with self.test_session() as sess:
actual_cdf_one = sess.run(cdf_op, feed_dict=feed_dict_one)
actual_cdf_two = sess.run(cdf_op, feed_dict=feed_dict_two)
self.assertAllClose(actual_cdf_one, expected_cdf_one)
self.assertAllClose(actual_cdf_two, expected_cdf_two)
def testCDFWithBatch(self):
histograms = [[0.1, 0.2, 0.3, 0.25, 0.15],
[0.0, 0.75, 0.2, 0.05, 0.0]]
event = [0, 3]
expected_cdf = [0.0, 0.95]
dist = categorical.Categorical(probs=histograms)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAllClose(cdf_op.eval(), expected_cdf)
def testCDFNoBatch(self):
histogram = [0.1, 0.2, 0.3, 0.4]
event = 2
expected_cdf = 0.3
dist = categorical.Categorical(probs=histogram)
cdf_op = dist.cdf(event)
with self.test_session():
self.assertAlmostEqual(cdf_op.eval(), expected_cdf)
def testLogPMF(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.log_prob([0, 1]).eval(), np.log([0.2, 0.4]))
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = categorical.Categorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testEntropyGradient(self):
with self.test_session() as sess:
logits = constant_op.constant([[1., 2., 3.], [2., 5., 1.]])
probabilities = nn_ops.softmax(logits)
log_probabilities = nn_ops.log_softmax(logits)
true_entropy = - math_ops.reduce_sum(
probabilities * log_probabilities, axis=-1)
categorical_distribution = categorical.Categorical(probs=probabilities)
categorical_entropy = categorical_distribution.entropy()
# works
true_entropy_g = gradients_impl.gradients(true_entropy, [logits])
categorical_entropy_g = gradients_impl.gradients(
categorical_entropy, [logits])
res = sess.run({"true_entropy": true_entropy,
"categorical_entropy": categorical_entropy,
"true_entropy_g": true_entropy_g,
"categorical_entropy_g": categorical_entropy_g})
self.assertAllClose(res["true_entropy"],
res["categorical_entropy"])
self.assertAllClose(res["true_entropy_g"],
res["categorical_entropy_g"])
def testSample(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
n = 10000
samples = dist.sample(n, seed=123)
samples.set_shape([n, 1, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
self.assertAllClose(
[[0.2, 0.4]], np.mean(
sample_values == 0, axis=0), atol=1e-2)
self.assertAllClose(
[[0.8, 0.6]], np.mean(
sample_values == 1, axis=0), atol=1e-2)
def testSampleWithSampleShape(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
[0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)
def testLogPMFBroadcasting(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
prob = dist.prob(1)
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([1])
self.assertAllClose([[0.8, 0.6]], prob.eval())
prob = dist.prob([0, 1])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[0, 1]])
self.assertAllClose([[0.2, 0.6]], prob.eval())
prob = dist.prob([[[0, 1]]])
self.assertAllClose([[[0.2, 0.6]]], prob.eval())
prob = dist.prob([[1, 0], [0, 1]])
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], prob.eval())
prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],
prob.eval())
def testLogPMFShape(self):
with self.test_session():
# shape [1, 2, 2]
histograms = [[[0.2, 0.8], [0.4, 0.6]]]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob([0, 1])
self.assertEqual(2, log_prob.get_shape().ndims)
self.assertAllEqual([1, 2], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testLogPMFShapeNoBatch(self):
histograms = [0.2, 0.8]
dist = categorical.Categorical(math_ops.log(histograms))
log_prob = dist.log_prob(0)
self.assertEqual(0, log_prob.get_shape().ndims)
self.assertAllEqual([], log_prob.get_shape())
log_prob = dist.log_prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertEqual(3, log_prob.get_shape().ndims)
self.assertAllEqual([2, 2, 2], log_prob.get_shape())
def testMode(self):
with self.test_session():
histograms = [[[0.2, 0.8], [0.6, 0.4]]]
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
self.assertAllEqual(dist.mode().eval(), [[1, 0]])
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.test_session() as sess:
for categories in [2, 4]:
for batch_size in [1, 10]:
a_logits = np.random.randn(batch_size, categories)
b_logits = np.random.randn(batch_size, categories)
a = categorical.Categorical(logits=a_logits)
b = categorical.Categorical(logits=b_logits)
kl = kullback_leibler.kl_divergence(a, b)
kl_val = sess.run(kl)
# Make sure KL(a||a) is 0
kl_same = sess.run(kullback_leibler.kl_divergence(a, a))
prob_a = np_softmax(a_logits)
prob_b = np_softmax(b_logits)
kl_expected = np.sum(prob_a * (np.log(prob_a) - np.log(prob_b)),
axis=-1)
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
test.main()
| apache-2.0 |
aviweit/libcloud | libcloud/test/compute/test_abiquo.py | 40 | 21013 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abiquo Test Suite
"""
import unittest
import sys
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.abiquo import AbiquoNodeDriver
from libcloud.common.abiquo import ForbiddenError, get_href
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.base import NodeLocation, NodeImage
from libcloud.test.compute import TestCaseMixin
from libcloud.test import MockHttpTestCase
from libcloud.test.file_fixtures import ComputeFileFixtures
class AbiquoNodeDriverTest(unittest.TestCase, TestCaseMixin):
"""
Abiquo Node Driver test suite
"""
def setUp(self):
"""
Set up the driver with the main user
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
self.driver = AbiquoNodeDriver('son', 'goku',
'http://dummy.host.com/api')
def test_unauthorized_controlled(self):
"""
Test the Unauthorized Exception is Controlled.
Test, through the 'login' method, that a '401 Unauthorized'
raises a 'InvalidCredsError' instead of the 'MalformedUrlException'
"""
self.assertRaises(InvalidCredsError, AbiquoNodeDriver, 'son',
'goten', 'http://dummy.host.com/api')
def test_forbidden_controlled(self):
"""
Test the Forbidden Exception is Controlled.
Test, through the 'list_images' method, that a '403 Forbidden'
raises an 'ForbidenError' instead of the 'MalformedUrlException'
"""
AbiquoNodeDriver.connectionCls.conn_classes = (AbiquoMockHttp, None)
conn = AbiquoNodeDriver('son', 'gohan', 'http://dummy.host.com/api')
self.assertRaises(ForbiddenError, conn.list_images)
def test_handle_other_errors_such_as_not_found(self):
"""
Test common 'logical' exceptions are controlled.
Test that common exception (normally 404-Not Found and 409-Conflict),
that return an XMLResponse with the explanation of the errors are
controlled.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
self.assertRaises(LibcloudError, self.driver.list_images)
def test_ex_create_and_delete_empty_group(self):
"""
Test the creation and deletion of an empty group.
"""
group = self.driver.ex_create_group('libcloud_test_group')
group.destroy()
def test_create_node_no_image_raise_exception(self):
"""
Test 'create_node' without image.
Test the 'create_node' function without 'image' parameter raises
an Exception
"""
self.assertRaises(LibcloudError, self.driver.create_node)
def test_list_locations_response(self):
if not self.should_list_locations:
return None
locations = self.driver.list_locations()
self.assertTrue(isinstance(locations, list))
def test_create_node_specify_location(self):
"""
Test you can create a node specifying the location.
"""
image = self.driver.list_images()[0]
location = self.driver.list_locations()[0]
self.driver.create_node(image=image, location=location)
def test_create_node_specify_wrong_location(self):
"""
Test you can not create a node with wrong location.
"""
image = self.driver.list_images()[0]
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_wrong_image(self):
"""
Test image compatibility.
Some locations only can handle a group of images, not all of them.
Test you can not create a node with incompatible image-location.
"""
# Create fake NodeImage
image = NodeImage(3234, 'dummy-image', self.driver)
location = self.driver.list_locations()[0]
# With this image, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.create_node, image=image,
location=location)
def test_create_node_specify_group_name(self):
"""
Test 'create_node' into a concrete group.
"""
image = self.driver.list_images()[0]
self.driver.create_node(image=image, group_name='new_group_name')
def test_create_group_location_does_not_exist(self):
"""
Test 'create_node' with an unexistent location.
Defines a 'fake' location and tries to create a node into it.
"""
location = NodeLocation(435, 'fake-location', 'Spain', self.driver)
# With this location, it should raise an Exception
self.assertRaises(LibcloudError, self.driver.ex_create_group,
name='new_group_name',
location=location)
def test_destroy_node_response(self):
"""
'destroy_node' basic test.
Override the destroy to return a different node available
to be undeployed. (by default it returns an already undeployed node,
for test creation).
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertTrue(ret)
def test_destroy_node_response_failed(self):
"""
'destroy_node' asynchronous error.
Test that the driver handles correctly when, for some reason,
the 'destroy' job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
ret = self.driver.destroy_node(node)
self.assertFalse(ret)
def test_destroy_node_allocation_state(self):
"""
Test the 'destroy_node' invalid state.
Try to destroy a node when the node is not running.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
# Override the destroy to return a different node available to be
# undeployed
node = self.driver.list_nodes()[0]
# The mock class with the user:password 've:geta' returns a node that
# is in 'ALLOCATION' state and hence, the 'destroy_node' method should
# raise a LibcloudError
self.assertRaises(LibcloudError, self.driver.destroy_node, node)
def test_destroy_not_deployed_group(self):
"""
Test 'ex_destroy_group' when group is not deployed.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertTrue(group.destroy())
def test_destroy_deployed_group(self):
"""
Test 'ex_destroy_group' when there are machines running.
"""
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertTrue(group.destroy())
def test_destroy_deployed_group_failed(self):
"""
Test 'ex_destroy_group' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('muten', 'roshi',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[0]
self.assertFalse(group.destroy())
def test_destroy_group_invalid_state(self):
"""
Test 'ex_destroy_group' invalid state.
Test the Driver raises an exception when the group is in
invalid temporal state.
"""
self.driver = AbiquoNodeDriver('ve', 'geta',
'http://dummy.host.com/api')
location = self.driver.list_locations()[0]
group = self.driver.ex_list_groups(location)[1]
self.assertRaises(LibcloudError, group.destroy)
def test_run_node(self):
"""
Test 'ex_run_node' feature.
"""
node = self.driver.list_nodes()[0]
# Node is by default in NodeState.TERMINATED and AbiquoState ==
# 'NOT_ALLOCATED'
# so it is available to be runned
self.driver.ex_run_node(node)
def test_run_node_invalid_state(self):
"""
Test 'ex_run_node' invalid state.
Test the Driver raises an exception when try to run a
node that is in invalid state to run.
"""
self.driver = AbiquoNodeDriver('go', 'trunks',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is by default in AbiquoState = 'ON' for user 'go:trunks'
# so is not available to be runned
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_run_node_failed(self):
"""
Test 'ex_run_node' fails.
Test driver handles correctly when, for some reason, the
asynchronous job fails.
"""
self.driver = AbiquoNodeDriver('ten', 'shin',
'http://dummy.host.com/api')
node = self.driver.list_nodes()[0]
# Node is in the correct state, but it fails because of the
# async task and it raises the error.
self.assertRaises(LibcloudError, self.driver.ex_run_node, node)
def test_get_href(self):
xml = '''
<datacenter>
<link href="http://10.60.12.7:80/api/admin/datacenters/2"
type="application/vnd.abiquo.datacenter+xml" rel="edit1"/>
<link href="http://10.60.12.7:80/ponies/bar/foo/api/admin/datacenters/3"
type="application/vnd.abiquo.datacenter+xml" rel="edit2"/>
<link href="http://vdcbridge.interoute.com:80/jclouds/apiouds/api/admin/enterprises/1234"
type="application/vnd.abiquo.datacenter+xml" rel="edit3"/>
</datacenter>
'''
elem = ET.XML(xml)
href = get_href(element=elem, rel='edit1')
self.assertEqual(href, '/admin/datacenters/2')
href = get_href(element=elem, rel='edit2')
self.assertEqual(href, '/admin/datacenters/3')
href = get_href(element=elem, rel='edit3')
self.assertEqual(href, '/admin/enterprises/1234')
class AbiquoMockHttp(MockHttpTestCase):
"""
Mock the functionallity of the remote Abiquo API.
"""
fixtures = ComputeFileFixtures('abiquo')
fixture_tag = 'default'
def _api_login(self, method, url, body, headers):
if headers['Authorization'] == 'Basic c29uOmdvdGVu':
expected_response = self.fixtures.load('unauthorized_user.html')
expected_status = httplib.UNAUTHORIZED
else:
expected_response = self.fixtures.load('login.xml')
expected_status = httplib.OK
return (expected_status, expected_response, {}, '')
def _api_cloud_virtualdatacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdcs.xml'), {}, '')
def _api_cloud_virtualdatacenters_4(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('vdc_4.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances(self, method, url, body, headers):
if method == 'POST':
vapp_name = ET.XML(body).findtext('name')
if vapp_name == 'libcloud_test_group':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
elif vapp_name == 'new_group_name':
# we come from 'test_ex_create_and_delete_empty_group(self):'
# method and so, we return the 'ok' return
response = self.fixtures.load('vdc_4_vapp_creation_ok.xml')
return (httplib.OK, response, {}, '')
else:
# It will be a 'GET';
return (httplib.OK, self.fixtures.load('vdc_4_vapps.xml'), {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5(self, method, url, body, headers):
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to destroy a group with 'needs_sync' state
response = self.fixtures.load('vdc_4_vapp_5_needs_sync.xml')
else:
# Try to destroy a group with 'undeployed' state
response = self.fixtures.load('vdc_4_vapp_5.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6(self, method, url, body, headers):
if method == 'GET':
# deployed vapp
response = self.fixtures.load('vdc_4_vapp_6.xml')
return (httplib.OK, response, {}, '')
else:
# it will be a 'DELETE'
return (httplib.NO_CONTENT, '', {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_1da8c8b6_86f6_49ef_9d29_57dcc73b875a(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_undeploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_5_virtualmachines(
self, method, url, body, headers):
# This virtual app never have virtual machines
if method == 'GET':
response = self.fixtures.load('vdc_4_vapp_5_vms.xml')
return (httplib.OK, response, {}, '')
elif method == 'POST':
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines(
self, method, url, body, headers):
# Default-created virtual app virtual machines'
if method == 'GET':
if headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
response = self.fixtures.load('vdc_4_vapp_6_vms_allocated.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vms.xml')
return (httplib.OK, response, {}, '')
else:
# it must be a POST
response = self.fixtures.load('vdc_4_vapp_6_vm_creation_ok.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3(self, method, url, body, headers):
if (headers['Authorization'] == 'Basic Z286dHJ1bmtz' or
headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk='):
# Undeploy node
response = self.fixtures.load("vdc_4_vapp_6_vm_3_deployed.xml")
elif headers['Authorization'] == 'Basic dmU6Z2V0YQ==':
# Try to undeploy a node with 'allocation' state
response = self.fixtures.load('vdc_4_vapp_6_vm_3_allocated.xml')
else:
# Get node
response = self.fixtures.load('vdc_4_vapp_6_vm_3.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_deploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_b44fe278_6b0f_4dfb_be81_7c03006a93cb(self, method, url, body, headers):
if headers['Authorization'] == 'Basic dGVuOnNoaW4=':
# User 'ten:shin' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_vm_3_deploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_deploy_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_action_undeploy(
self, method, url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_undeploy.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_reset(self, method,
url, body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_tasks_a8c9818e_f389_45b7_be2c_3db3a9689940(self, method, url, body, headers):
if headers['Authorization'] == 'Basic bXV0ZW46cm9zaGk=':
# User 'muten:roshi' failed task
response = self.fixtures.load(
'vdc_4_vapp_6_undeploy_task_failed.xml')
else:
response = self.fixtures.load('vdc_4_vapp_6_vm_3_reset_task.xml')
return (httplib.OK, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_action_undeploy(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_undeploy.xml')
return (httplib.CREATED, response, {}, '')
def _api_cloud_virtualdatacenters_4_virtualappliances_6_virtualmachines_3_network_nics(self, method, url,
body, headers):
response = self.fixtures.load('vdc_4_vapp_6_vm_3_nics.xml')
return (httplib.OK, response, {}, '')
def _api_admin_datacenters(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('dcs.xml'), {}, '')
def _api_admin_enterprises_1(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories(self, method, url, body, headers):
# When the user is the common one for all the tests ('son, 'goku')
# it creates this basic auth and we return the datacenters value
if headers['Authorization'] == 'Basic Z286dHJ1bmtz':
expected_response = self.fixtures.load("not_found_error.xml")
return (httplib.NOT_FOUND, expected_response, {}, '')
elif headers['Authorization'] != 'Basic c29uOmdvaGFu':
return (httplib.OK, self.fixtures.load('ent_1_dcreps.xml'), {}, '')
else:
# son:gohan user: forbidden error
expected_response = self.fixtures.load("privilege_errors.html")
return (httplib.FORBIDDEN, expected_response, {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2.xml'), {}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates(self, method, url, body, headers):
return (httplib.OK, self.fixtures.load('ent_1_dcrep_2_templates.xml'),
{}, '')
def _api_admin_enterprises_1_datacenterrepositories_2_virtualmachinetemplates_11(self, method, url, body, headers):
return (
httplib.OK, self.fixtures.load('ent_1_dcrep_2_template_11.xml'),
{}, '')
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
ydre/kit-soft | ReceptionPython/LogParser.py | 6 | 2295 | #!/usr/bin/env python
import sys
import numpy as np
from operator import itemgetter, attrgetter
import Image
from math import *
try:
sys.argv[1]
except NameError:
startingpoint = 'Missing an arg'
else:
startingpoint = sys.argv[1]
i = 0
MegaLine = []
Tableau = []
BaseData = []
print BaseData
print "\n\n"
with open(startingpoint, 'r') as echOpenLog:
for line in echOpenLog:
if (i==4):
#print MegaLine
Tableau.append(MegaLine)
i=0
if (i==0):
line = line.split('\t')
del line[0]
del line[-1]
MegaLine = line
else:
line = line.split('\t')
del line[0]
del line[0]
del line[-1]
MegaLine += line
i=i+1
Tableau.append(MegaLine)
data = np.array(Tableau).astype(int)
col = 0
SortedTable = data[np.argsort(data[:,col])]
PointsPerLine = len(SortedTable[0])
NbOfPoints = len(SortedTable)
print PointsPerLine
print NbOfPoints
targetFile = open("debug.log", 'w')
moyenneLine =[]
FinReset = 200
moyenne = []
##On cree le fichier de debug.. et la moyenne sur les 200 derniers points
for x in range(NbOfPoints):
for y in range (PointsPerLine):
SortedTable[x][y] = int(SortedTable[x][y])
targetFile.write(str(SortedTable[x][y])+"\t")
if (y>PointsPerLine-FinReset):
moyenneLine.append(int(SortedTable[x][y]))
targetFile.write("\n")
##On retire le niveau sur les 200 derniers points pour moyenner
for x in range(NbOfPoints):
TMP_Mean = sum(moyenneLine) / float(len(moyenneLine))
for y in range (PointsPerLine):
SortedTable[x][y] = int(abs(float(SortedTable[x][y]) - TMP_Mean))
##On retire les echos initiaux
InitReset = 190
moyenne = []
for x in range(NbOfPoints):
for y in range (PointsPerLine):
if (y<InitReset):
moyenne.append(int(SortedTable[x][y]))
else:
moyenne.append(0)
moyenne[y] = int(moyenne[y]/InitReset)
size = (NbOfPoints,PointsPerLine)
print size
im = Image.new('RGB',size)
pix = im.load()
for i in range(size[0]):
for j in range(size[1]):
value = abs((int(SortedTable[i][j]))-moyenne[j])
SortedTable[i][j] = int(value **(1/1))
SortedTable = SortedTable[np.argsort(SortedTable[:,col])]
print SortedTable
for i in range(size[0]):
for j in range(size[1]):
value = int(SortedTable[i][j])
pix[i,j] = (value,value,value)
outfile = startingpoint +".png"
im.save(outfile)
| bsd-3-clause |
nvoron23/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/cp1251.py | 593 | 13617 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.