text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import print_function
import argparse
import sys
import time
parser = argparse.ArgumentParser(description="Removes potential deamination from vcf file")
#add options to argparser
parser.add_argument('-i', action="store", dest="vcf_input", type=str)
parser.add_argument('-o', action="store", dest="vcf_output", type=str)
#test parameters
try:
options=parser.parse_args()
except:
parser.print_help()
sys.exit(0)
vcf_input=options.vcf_input
vcf_output=options.vcf_output
outfile=open(vcf_output,'w')
def parser(i):
"""
Removes potential deamination from vcf file
takes a snp (or line) in vcf file and when encountering
potential deamination, replaces its genotype with './.'
(missing).
"""
changed_snps=0
if (i[3]=='C' and i[4]=='T'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('1/1')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='G' and i[4]=='A'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('1/1')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='T' and i[4]=='C'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('0/0')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='A' and i[4]=='G'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('0/0')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
else: #if none of the above, just return the unchanged line
return(i)
start = time.time()
#iterate through each line
counter=0
initial_vcf=[]
header=[]
with open(vcf_input,'r') as f:
for line in f:
#get header lines
if line.startswith('#'):
headline=line.strip('\n').split("\t")
header.append(headline)
outfile.write('\t'.join(headline) + '\n')
else:
counter += 1
#process snp lines with the parser function
snp=line.strip('\n').split("\t")
outfile.write('\t'.join(parser(snp)) + '\n')
sys.stdout.write("SNPs processed: %d \r" % (counter) )
sys.stdout.flush()
outfile.close()
end = time.time()
elapsed = end - start
print("\n" + str(round(elapsed,2)) + " sec.")
|
{
"content_hash": "b5c9235b8714acc1781f7bb748724c93",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 91,
"avg_line_length": 23.517857142857142,
"alnum_prop": 0.6476841305998481,
"repo_name": "ruidlpm/Utils",
"id": "05286d3a527cf876de721250e323fcbdbd52bfe2",
"size": "3071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filter_vcf_deamination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13297"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
}
|
import copy
class UnspecifiedError(Exception):
"""Base class for all exceptions in docker_registry."""
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message', 'No details')
super(UnspecifiedError, self).__init__(*args, **kwargs)
class UsageError(UnspecifiedError):
"""Exceptions related to use of the library.
Missing files, wrong argument type, etc.
"""
class NotImplementedError(UsageError):
"""The requested feature is not supported / not implemented."""
class FileNotFoundError(UsageError):
"""The requested (config) file is missing."""
class WrongArgumentsError(UsageError):
"""Expected arguments type not satisfied."""
class ConfigError(UsageError):
"""The provided configuration has problems."""
class ConnectionError(UnspecifiedError):
"""Network communication related errors all inherit this."""
class UnreachableError(ConnectionError):
"""The requested server is not reachable."""
class MissingError(ConnectionError):
"""The requested ressource is not to be found on the server."""
class BrokenError(ConnectionError):
"""Something died on our hands, that the server couldn't digest..."""
def merge_dicts(a, b, raise_conflicts=False, path=None):
"""
Merges the values of B into A.
If the raise_conflicts flag is set to True, a LookupError will be raised if the keys are conflicting.
:param a: the target dictionary
:param b: the dictionary to import
:param raise_conflicts: flag to raise an exception if two keys are colliding
:param path: the dictionary path. Used to show where the keys are conflicting when an exception is raised.
:return: The dictionary A with the values of the dictionary B merged into it.
"""
# Set path.
if path is None:
path = []
# Go through the keys of the 2 dictionaries.
for key in b:
# If the key exist in both dictionary, check whether we must update or not.
if key in a:
# Dig deeper for keys that have dictionary values.
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], raise_conflicts=raise_conflicts, path=(path + [str(key)]))
# Skip the identical values.
elif a[key] == b[key]:
pass
else:
# Otherwise raise an error if the same keys have different values.
if raise_conflicts:
raise LookupError("Conflict at '{path}'".format(path='.'.join(path + [str(key)])))
# Or replace the value of A with the value of B.
a[key] = b[key]
else:
# If the key does not exist in A, import it.
a[key] = copy.deepcopy(b[key]) if isinstance(b[key], dict) else b[key]
return a
|
{
"content_hash": "3ea89cffac1ff7ae096bb75af6bdfc51",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 110,
"avg_line_length": 31.921348314606742,
"alnum_prop": 0.6448433650123196,
"repo_name": "rgreinho/saliere",
"id": "9059eace99c26f8531ddd78d9019d84c08b583ee",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saliere/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "270"
},
{
"name": "Python",
"bytes": "25295"
},
{
"name": "Ruby",
"bytes": "2347"
},
{
"name": "SaltStack",
"bytes": "281"
},
{
"name": "Shell",
"bytes": "2254"
}
],
"symlink_target": ""
}
|
"""
TODO
"""
import sys
import logging
import numpy
import numpy as np
from theano import function, shared
from theano import tensor as TT
import theano
sharedX = lambda X, name : \
shared(numpy.asarray(X, dtype=theano.config.floatX), name=name)
def kinetic_energy(vel):
"""
Returns the kinetic energy associated with the given velocity and mass of 1.
Parameters
----------
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
Returns
-------
return: theano vector
Vector whose i-th entry is the kinetic entry associated with vel[i].
"""
return 0.5 * (vel**2).sum(axis=1)
def hamiltonian(pos, vel, energy_fn):
"""
Returns the Hamiltonian (sum of potential and kinetic energy) for the given
velocity and position.
Parameters
----------
pos: theano matrix
Symbolic matrix whose rows are position vectors.
vel: theano matrix
Symbolic matrix whose rows are velocity vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
Returns
-------
return: theano vector
Vector whose i-th entry is the Hamiltonian at position pos[i] and
velocity vel[i].
"""
# assuming mass is 1
return energy_fn(pos) + kinetic_energy(vel)
def metropolis_hastings_accept(energy_prev, energy_next, s_rng):
"""
Performs a Metropolis-Hastings accept-reject move.
Parameters
----------
energy_prev: theano vector
Symbolic theano tensor which contains the energy associated with the
configuration at time-step t.
energy_next: theano vector
Symbolic theano tensor which contains the energy associated with the
proposed configuration at time-step t+1.
s_rng: theano.tensor.shared_randomstreams.RandomStreams
Theano shared random stream object used to generate the random number
used in proposal.
Returns
-------
return: boolean
True if move is accepted, False otherwise
"""
ediff = energy_prev - energy_next
return (TT.exp(ediff) - s_rng.uniform(size=energy_prev.shape)) >= 0
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
"""
Return final (position, velocity) obtained after an `n_steps` leapfrog
updates, using Hamiltonian dynamics.
Parameters
----------
initial_pos: shared theano matrix
Initial position at which to start the simulation
initial_vel: shared theano matrix
Initial velocity of particles
stepsize: shared theano scalar
Scalar value controlling amount by which to move
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
Returns
-------
rval1: theano matrix
Final positions obtained after simulation
rval2: theano matrix
Final velocity obtained after simulation
"""
def leapfrog(pos, vel, step):
"""
Inside loop of Scan. Performs one step of leapfrog update, using
Hamiltonian dynamics.
Parameters
----------
pos: theano matrix
in leapfrog update equations, represents pos(t), position at time t
vel: theano matrix
in leapfrog update equations, represents vel(t - stepsize/2),
velocity at time (t - stepsize/2)
step: theano scalar
scalar value controlling amount by which to move
Returns
-------
rval1: [theano matrix, theano matrix]
Symbolic theano matrices for new position pos(t + stepsize), and
velocity vel(t + stepsize/2)
rval2: dictionary
Dictionary of updates for the Scan Op
"""
# from pos(t) and vel(t-stepsize/2), compute vel(t+stepsize/2)
dE_dpos = TT.grad(energy_fn(pos).sum(), pos)
new_vel = vel - step * dE_dpos
# from vel(t+stepsize/2) compute pos(t+stepsize)
new_pos = pos + step * new_vel
return [new_pos, new_vel],{}
# compute velocity at time-step: t + stepsize/2
initial_energy = energy_fn(initial_pos)
dE_dpos = TT.grad(initial_energy.sum(), initial_pos)
vel_half_step = initial_vel - 0.5*stepsize*dE_dpos
# compute position at time-step: t + stepsize
pos_full_step = initial_pos + stepsize * vel_half_step
# perform leapfrog updates: the scan op is used to repeatedly compute
# vel(t + (m-1/2)*stepsize) and pos(t + m*stepsize) for m in [2,n_steps].
(final_pos, final_vel), scan_updates = theano.scan(leapfrog,
outputs_info=[
dict(initial=pos_full_step, return_steps=1),
dict(initial=vel_half_step, return_steps=1),
],
non_sequences=[stepsize],
n_steps=n_steps-1)
# The last velocity returned by scan is vel(t + (n_steps-1/2)*stepsize)
# We therefore perform one more half-step to return vel(t + n_steps*stepsize)
energy = energy_fn(final_pos)
final_vel = final_vel - 0.5 * stepsize * TT.grad(energy.sum(), final_pos)
# return new proposal state
return final_pos, final_vel
def hmc_move(s_rng, positions, energy_fn, stepsize, n_steps):
"""
This function performs one-step of Hybrid Monte-Carlo sampling. We start by
sampling a random velocity from a univariate Gaussian distribution, perform
`n_steps` leap-frog updates using Hamiltonian dynamics and accept-reject
using Metropolis-Hastings.
Parameters
----------
s_rng: theano shared random stream
Symbolic random number generator used to draw random velocity and
perform accept-reject move.
positions: shared theano matrix
Symbolic matrix whose rows are position vectors.
energy_fn: python function
Python function, operating on symbolic theano variables, used to compute
the potential energy at a given position.
stepsize: shared theano scalar
Shared variable containing the stepsize to use for `n_steps` of HMC
simulation steps.
n_steps: integer
Number of HMC steps to perform before proposing a new position.
Returns
-------
rval1: boolean
True if move is accepted, False otherwise
rval2: theano matrix
Matrix whose rows contain the proposed "new position"
"""
# sample random velocity
initial_vel = s_rng.normal(size=positions.shape)
# perform simulation of particles subject to Hamiltonian dynamics
final_pos, final_vel = simulate_dynamics(
initial_pos = positions,
initial_vel = initial_vel,
stepsize = stepsize,
n_steps = n_steps,
energy_fn = energy_fn)
# accept/reject the proposed move based on the joint distribution
accept = metropolis_hastings_accept(
energy_prev = hamiltonian(positions, initial_vel, energy_fn),
energy_next = hamiltonian(final_pos, final_vel, energy_fn),
s_rng=s_rng)
return accept, final_pos
def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
target_acceptance_rate, stepsize_inc, stepsize_dec,
stepsize_min, stepsize_max, avg_acceptance_slowness):
"""
This function is executed after `n_steps` of HMC sampling (`hmc_move`
function). It creates the updates dictionary used by the `simulate`
function. It takes care of updating: the position (if the move is accepted),
the stepsize (to track a given target acceptance rate) and the average
acceptance rate (computed as a moving average).
Parameters
----------
positions: shared variable, theano matrix
Shared theano matrix whose rows contain the old position
stepsize: shared variable, theano scalar
Shared theano scalar containing current step size
avg_acceptance_rate: shared variable, theano scalar
Shared theano scalar containing the current average acceptance rate
final_pos: shared variable, theano matrix
Shared theano matrix whose rows contain the new position
accept: theano scalar
Boolean-type variable representing whether or not the proposed HMC move
should be accepted or not.
target_acceptance_rate: float
The stepsize is modified in order to track this target acceptance rate.
stepsize_inc: float
Amount by which to increment stepsize when acceptance rate is too high.
stepsize_dec: float
Amount by which to decrement stepsize when acceptance rate is too low.
stepsize_min: float
Lower-bound on `stepsize`.
stepsize_min: float
Upper-bound on `stepsize`.
avg_acceptance_slowness: float
Average acceptance rate is computed as an exponential moving average.
(1-avg_acceptance_slowness) is the weight given to the newest
observation.
Returns
-------
rval1: dictionary-like
A dictionary of updates to be used by the `HMC_Sampler.simulate`
function. The updates target the position, stepsize and average
acceptance rate.
"""
## POSITION UPDATES ##
# broadcast `accept` scalar to tensor with the same dimensions as final_pos.
accept_matrix = accept.dimshuffle(0, *(('x',)*(final_pos.ndim-1)))
# if accept is True, update to `final_pos` else stay put
new_positions = TT.switch(accept_matrix, final_pos, positions)
## STEPSIZE UPDATES ##
# if acceptance rate is too low, our sampler is too "noisy" and we reduce
# the stepsize. If it is too high, our sampler is too conservative, we can
# get away with a larger stepsize (resulting in better mixing).
_new_stepsize = TT.switch(avg_acceptance_rate > target_acceptance_rate,
stepsize * stepsize_inc, stepsize * stepsize_dec)
# maintain stepsize in [stepsize_min, stepsize_max]
new_stepsize = TT.clip(_new_stepsize, stepsize_min, stepsize_max)
## ACCEPT RATE UPDATES ##
# perform exponential moving average
new_acceptance_rate = TT.add(
avg_acceptance_slowness * avg_acceptance_rate,
(1.0 - avg_acceptance_slowness) * accept.mean())
return [(positions, new_positions),
(stepsize, new_stepsize),
(avg_acceptance_rate, new_acceptance_rate)]
class HMC_sampler(object):
"""
Convenience wrapper for performing Hybrid Monte Carlo (HMC). It creates the
symbolic graph for performing an HMC simulation (using `hmc_move` and
`hmc_updates`). The graph is then compiled into the `simulate` function, a
theano function which runs the simulation and updates the required shared
variables.
Users should interface with the sampler thorugh the `draw` function which
advances the markov chain and returns the current sample by calling
`simulate` and `get_position` in sequence.
The hyper-parameters are the same as those used by Marc'Aurelio's
'train_mcRBM.py' file (available on his personal home page).
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def new_from_shared_positions(cls, shared_positions, energy_fn,
initial_stepsize=0.01, target_acceptance_rate=.9, n_steps=20,
stepsize_dec = 0.98,
stepsize_min = 0.001,
stepsize_max = 0.25,
stepsize_inc = 1.02,
avg_acceptance_slowness = 0.9, # used in geometric avg. 1.0 would be not moving at all
seed=12345):
"""
:param shared_positions: theano ndarray shared var with many particle [initial] positions
:param energy_fn:
callable such that energy_fn(positions)
returns theano vector of energies.
The len of this vector is the batchsize.
The sum of this energy vector must be differentiable (with theano.tensor.grad) with
respect to the positions for HMC sampling to work.
"""
batchsize = shared_positions.shape[0]
# allocate shared variables
stepsize = sharedX(initial_stepsize, 'hmc_stepsize')
avg_acceptance_rate = sharedX(target_acceptance_rate, 'avg_acceptance_rate')
s_rng = TT.shared_randomstreams.RandomStreams(seed)
# define graph for an `n_steps` HMC simulation
accept, final_pos = hmc_move(
s_rng,
shared_positions,
energy_fn,
stepsize,
n_steps)
# define the dictionary of updates, to apply on every `simulate` call
simulate_updates = hmc_updates(
shared_positions,
stepsize,
avg_acceptance_rate,
final_pos=final_pos,
accept=accept,
stepsize_min=stepsize_min,
stepsize_max=stepsize_max,
stepsize_inc=stepsize_inc,
stepsize_dec=stepsize_dec,
target_acceptance_rate=target_acceptance_rate,
avg_acceptance_slowness=avg_acceptance_slowness)
# compile theano function
simulate = function([], [], updates=simulate_updates)
# create HMC_sampler object with the following attributes ...
return cls(
positions=shared_positions,
stepsize=stepsize,
stepsize_min=stepsize_min,
stepsize_max=stepsize_max,
avg_acceptance_rate=avg_acceptance_rate,
target_acceptance_rate=target_acceptance_rate,
s_rng=s_rng,
_updates=simulate_updates,
simulate=simulate)
def draw(self, **kwargs):
"""
Returns a new position obtained after `n_steps` of HMC simulation.
Parameters
----------
kwargs: dictionary
The `kwargs` dictionary is passed to the shared variable
(self.positions) `get_value()` function. For example, to avoid
copying the shared variable value, consider passing `borrow=True`.
Returns
-------
rval: numpy matrix
Numpy matrix whose of dimensions similar to `initial_position`.
"""
self.simulate()
return self.positions.value.copy()
|
{
"content_hash": "6a27c91bc822adac9b49bbd96d2701f9",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 98,
"avg_line_length": 37.23650385604113,
"alnum_prop": 0.6405246807041768,
"repo_name": "NoSRPKU/GradD",
"id": "4e719ff9e6fc04689dd4a953b0531c85b573e3a1",
"size": "14485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codes/mcrbm/hmc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2676"
},
{
"name": "Python",
"bytes": "346567"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
import os
import csv
logger = logging.getLogger(__name__)
class CERerankingEvaluator:
"""
This class evaluates a CrossEncoder model for the task of re-ranking.
Given a query and a list of documents, it computes the score [query, doc_i] for all possible
documents and sorts them in decreasing order. Then, MRR@10 is compute to measure the quality of the ranking.
:param samples: Must be a list and each element is of the form: {'query': '', 'positive': [], 'negative': []}. Query is the search query,
positive is a list of positive (relevant) documents, negative is a list of negative (irrelevant) documents.
"""
def __init__(self, samples, mrr_at_k: int = 10, name: str = '', write_csv: bool = True):
self.samples = samples
self.name = name
self.mrr_at_k = mrr_at_k
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = "CERerankingEvaluator" + ("_" + name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps", "MRR@{}".format(mrr_at_k)]
self.write_csv = write_csv
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CERerankingEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
all_mrr_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance['query']
positive = list(instance['positive'])
negative = list(instance['negative'])
docs = positive + negative
is_relevant = [True]*len(positive) + [False]*len(negative)
if len(positive) == 0 or len(negative) == 0:
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
model_input = [[query, doc] for doc in docs]
pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
pred_scores_argsort = np.argsort(-pred_scores) #Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0:self.mrr_at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank+1)
break
all_mrr_scores.append(mrr_score)
mean_mrr = np.mean(all_mrr_scores)
logger.info("Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}".format(num_queries, np.min(num_positives), np.mean(num_positives), np.max(num_positives), np.min(num_negatives), np.mean(num_negatives), np.max(num_negatives)))
logger.info("MRR@{}: {:.2f}".format(self.mrr_at_k, mean_mrr*100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_mrr])
return mean_mrr
|
{
"content_hash": "67d9f4fba9b76a69bcd29adcf1733c43",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 294,
"avg_line_length": 42.752941176470586,
"alnum_prop": 0.5784259768849752,
"repo_name": "UKPLab/sentence-transformers",
"id": "e2f47ff10f6da4c1c428a1c4e07737859f1716ff",
"size": "3634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentence_transformers/cross_encoder/evaluation/CERerankingEvaluator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "342520"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rds20140815DescribeModifyParameterLogRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.EndTime = None
self.PageNumber = None
self.PageSize = None
self.StartTime = None
def getapiname(self):
return 'rds.aliyuncs.com.DescribeModifyParameterLog.2014-08-15'
|
{
"content_hash": "6e62baec422473a175768a0c8f8e7896",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 29.533333333333335,
"alnum_prop": 0.7471783295711061,
"repo_name": "francisar/rds_manager",
"id": "18b4e41f2dc4a96d2316fee4a4a1694e338e8c3f",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aliyun/api/rest/Rds20140815DescribeModifyParameterLogRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259509"
},
{
"name": "Shell",
"bytes": "1481"
}
],
"symlink_target": ""
}
|
""" The crypto.hash package.
Part of the CryptoPy framework.
"""
|
{
"content_hash": "c5d9b7e8a86ce901d73e34f8c7738a2c",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 23,
"alnum_prop": 0.6666666666666666,
"repo_name": "mrknow/filmkodi",
"id": "4fc0e5e31470a40734c4fd495b62dbcdd289a739",
"size": "98",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "plugin.video.mrknow/resources/lib/crypto/hash/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
}
|
import subprocess
import numpy as np
import pandas as pd
import itertools
from sklearn.metrics import log_loss
import avito2_io
import pdb
BASE = avito2_io.BASE
DATA = avito2_io.DATA
VWDATA = BASE + 'vwdata/'
TMP = BASE + 'tmp/'
SUBMISSION = BASE + '/submissions/submission_%d.csv'
SAMPLE = DATA + '/sampleSubmission.csv'
def sigmoid(x):
return 1./(1. + np.exp(-x))
def inv_sigmoid(x):
return np.log(x/(1.-x))
def vw_train(data_file,
l2=None,
l1=None,
keep=None,
ignore=None,
quadratic=None,
passes=None,
model_file=None,
learn_rate=0.1,
holdout=False,
other=None):
"""
Function programmatically calls VW for training.
Optionally writes out the model file containing the learned weights.
Params:
data_file - the training data in VW's input format
l2 - (optional) the L2 regularization parameter
l1 - (optional) the L1 regularization parameter
NOTE: In VW, this is per row, so it should be small ~ 1e-7
keep - (optional) string with the first letters of namespaces to use,
others are ignored. Default is use all namespaces.
ignore - (optional) string with the first letters of the namespaces to ignore.
VW uses all of the others. Default is ignore none/use all.
NOTE: At most one of keep and ignore can be specified.
quadratic - string with first letters of all namespaces that should be crossed
to make quadratic terms. Uses all pairs of these.
passes - (optional) the number of passes to use. Default is 1.
model_file - (optional) A file to write the final learned models out to.
If not specified, training is run, and there is output,
but no model is saved.
learn_rate - default 0.5
holdout - default True. Use VW defaults for holdout. If False, no holdout.
other - A list of strings to pass to VW as command line arguments
Returns:
nothing, but writes out the final regressor at <model_file>
"""
cmdline = ['vw',
'-d', data_file,
'-b', '26',
'-l', str(learn_rate),
'--loss_function', 'logistic',
'--progress', '1000000']
if l2 is not None:
cmdline.extend(['--l2', str(l2)])
if l1 is not None:
cmdline.extend(['--l1', str(l1)])
if passes is not None:
cmdline.extend(['--passes', str(passes), '-c'])
if model_file is not None:
cmdline.extend(['-f', model_file])
if keep is not None and ignore is not None:
raise ValueError('At most one of --keep and --ignore can be specified.')
if keep is not None:
arg = '--keep'
names = keep
if ignore is not None:
arg = '--ignore'
names = ignore
if keep is not None or ignore is not None:
for n in names:
cmdline.extend([arg, n])
if quadratic is not None:
for (a,b) in itertools.combinations(quadratic, 2):
cmdline.extend(['-q', a+b])
if not holdout:
cmdline.append('--holdout_off')
if other is not None:
cmdline.extend(other)
subprocess.call(cmdline)
def vw_predict(model_file, test_data, outfile=None):
"""
Function programmatically calls VW.
Optionally saves the output in VW's output format:
<prediction> <tag>
where <prediction> will be of the log-odds scale.
Params:
model_file - the file resulting from a VW training run
test_data - the data to predict on in VW input format
outfile - Optional. VW will write results here.
"""
cmdline = ['vw',
'-t', test_data,
'-i', model_file,
'--loss_function', 'logistic',
'--progress', '1000000']
if outfile is not None:
cmdline.extend(['-p', outfile])
subprocess.call(cmdline)
def score_val(infile, offset=0):
"""
Takes a file with the format <prediction> <label> where the label
is -1/1 and the predictions are in log-odds form and outputs the
log loss for those predictions and labels. The format is
what VW outputs if the labels are the tag for the training set.
Params:
infile - path to the prediction file
offset - an amount added to the log-odds of each prediction
before computing probabilities
Returns:
the log loss for the predictions and labels in <infile>
"""
data = read_vw_results(infile, offset)
# For validation, the tag contains labels, not id's
labels = (data['ID'] == 1).astype(float)
return log_loss(labels.values, data.IsClick.values)
def write_vw_submission(submit_num, infile1, infile2=None, offset1=0, offset2=0):
"""
Takes a file with the format <prediction> <ID> where the prediction
is in log-odds form. Writes out a file suitable for submitting.
Params:
submit_num - output is submission_<submit_num>.csv
infile1 - path to the file with test set predictions
offset1 - an amount added to the log-odds of each prediction
from infile1 before computing probabilities
infile2 - path to a file with more test set predictions
offset2 - an offset to apply to log-odds values in infile2
"""
out = read_vw_results(infile1, offset1)
submit_file = SUBMISSION % submit_num
out[['ID','IsClick']].to_csv(submit_file, index=False)
if infile2 is not None:
out = read_vw_results(infile2, offset2)
out[['ID','IsClick']].to_csv(submit_file, index=False, header=None, mode='a')
print 'wrote ' + submit_file
def read_vw_results(infile, offset):
"""
Factors out common code for accessing data in WV output format,
which is <prediction> <tag>. Tag can be an example ID or a label.
Params:
infile - path to the file with test set predictions
offset - an amount added to the log-odds of each prediction
from infile1 before computing probabilities
"""
data = pd.read_csv(infile, delimiter=' ', header=None)
pred = data[0]
pred += offset
probs = sigmoid(pred)
out = pd.DataFrame({'ID':data[1],'IsClick':probs})
return out
def combine(submissions, weights=None, logspace=True):
"""
Params:
submissions - a list of pandas data frames containing submissions.
The ID's need not be ordered concordantly.
w - Sequence or array of weights for averaging the submissions.
Optional, default uniform. Weights will be normalized to sum to one.
Converted to np.array of dtype float.
logspace - default True. Take the average on the log-odds scale.
Returns:
a pandas data frame that can be submitted in which the IsClick field
is the weighted average of the IsClick field in <submissions>.
"""
if weights is None:
weights = np.ones(len(submissions))
else:
weights = np.array(weights, dtype=float)
weights = weights/np.sum(weights)
ss = pd.read_csv(SAMPLE)
ss.rename(columns={'IsClick':'total'}, inplace=True)
ss.total = 0.0
for (w, sub) in zip(weights, submissions):
ss = ss.merge(sub, on='ID')
if logspace:
ss.total = ss.total + w * ss.IsClick.apply(inv_sigmoid)
else:
ss.total = ss.total + w * ss.IsClick
del ss['IsClick']
if logspace:
ss.total = ss.total.apply(sigmoid)
ss.rename(columns={'total':'IsClick'}, inplace=True)
return ss
|
{
"content_hash": "72d4e0d633c614ccc6ccb683653edbd1",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 82,
"avg_line_length": 31.847161572052403,
"alnum_prop": 0.6492527080762375,
"repo_name": "davidthaler/Kaggle_Avito-2015",
"id": "6ab6360982059d5a6e9730379e3b51df002f1b77",
"size": "7293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99765"
}
],
"symlink_target": ""
}
|
import grpc
from google.cloud.language_v1.proto import (
language_service_pb2 as google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2,
)
class LanguageServiceStub(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AnalyzeSentiment = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeSentiment",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSentimentResponse.FromString,
)
self.AnalyzeEntities = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeEntities",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitiesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitiesResponse.FromString,
)
self.AnalyzeEntitySentiment = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeEntitySentiment",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentResponse.FromString,
)
self.AnalyzeSyntax = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnalyzeSyntax",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSyntaxRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSyntaxResponse.FromString,
)
self.ClassifyText = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/ClassifyText",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.ClassifyTextRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.ClassifyTextResponse.FromString,
)
self.AnnotateText = channel.unary_unary(
"/google.cloud.language.v1.LanguageService/AnnotateText",
request_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnnotateTextRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnnotateTextResponse.FromString,
)
class LanguageServiceServicer(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def AnalyzeSentiment(self, request, context):
"""Analyzes the sentiment of the provided text.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AnalyzeEntities(self, request, context):
"""Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AnalyzeEntitySentiment(self, request, context):
"""Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes
sentiment associated with each entity and its mentions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AnalyzeSyntax(self, request, context):
"""Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ClassifyText(self, request, context):
"""Classifies a document into categories.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def AnnotateText(self, request, context):
"""A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_LanguageServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"AnalyzeSentiment": grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSentiment,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSentimentResponse.SerializeToString,
),
"AnalyzeEntities": grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntities,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitiesRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitiesResponse.SerializeToString,
),
"AnalyzeEntitySentiment": grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntitySentiment,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeEntitySentimentResponse.SerializeToString,
),
"AnalyzeSyntax": grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSyntax,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSyntaxRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnalyzeSyntaxResponse.SerializeToString,
),
"ClassifyText": grpc.unary_unary_rpc_method_handler(
servicer.ClassifyText,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.ClassifyTextRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.ClassifyTextResponse.SerializeToString,
),
"AnnotateText": grpc.unary_unary_rpc_method_handler(
servicer.AnnotateText,
request_deserializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnnotateTextRequest.FromString,
response_serializer=google_dot_cloud_dot_language__v1_dot_proto_dot_language__service__pb2.AnnotateTextResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.language.v1.LanguageService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
{
"content_hash": "ee7893452ebf3abc4c3d077a2c65747e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 152,
"avg_line_length": 56.212765957446805,
"alnum_prop": 0.7209184960888216,
"repo_name": "dhermes/gcloud-python",
"id": "40a7da3057736fb489766a16d0fc4a9b42bd9eb6",
"size": "7996",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "language/google/cloud/language_v1/proto/language_service_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
"""Enable periodic transmission of DB and job-produced content to BigQuery."""
__author__ = [
'Michael Gainer (mgainer@google.com)',
]
import base64
import collections
import copy
import datetime
import logging
import os
import random
import re
import sys
import traceback
import apiclient
import httplib2
import oauth2client
from common import catch_and_log
from common import crypto
from common import schema_fields
from common import utils as common_utils
from controllers import sites
from controllers import utils
from models import analytics
from models import courses
from models import custom_modules
from models import data_sources
from models import jobs
from models import roles
from models import transforms
from modules.dashboard import dashboard
from modules.dashboard import tabs
from google.appengine.ext import db
from google.appengine.ext import deferred
# CourseBuilder setup strings
XSRF_ACTION_NAME = 'data_pump'
DASHBOARD_ACTION = 'data_pump'
# Separate permission to be able to push user data delegable to non-super-users
ACCESS_PERMISSION = 'push_data'
ACCESS_PERMISSION_DESCRIPTION = 'Can push user data outside CourseBuilder.'
# Connection parameters for discovering and auth to BigQuery.
BIGQUERY_RW_SCOPE = 'https://www.googleapis.com/auth/bigquery'
BIGQUERY_API_NAME = 'bigquery'
BIGQUERY_API_VERSION = 'v2'
# API endpoint for initiating a retryable upload.
BIGQUERY_API_UPLOAD_URL_PREFIX = (
'https://www.googleapis.com/upload/bigquery/v2/projects/')
# UI for BigQuery interactive queries
BIGQUERY_UI_URL_PREFIX = 'https://bigquery.cloud.google.com/table/'
# Max of about 20 min of retries (random exponential backoff from 2^1...2^MAX)
MAX_CONSECUTIVE_FAILURES = 10
MAX_RETRY_BACKOFF_SECONDS = 600
# Config for secret
PII_SECRET_LENGTH = 20
PII_SECRET_DEFAULT_LIFETIME = '30 days'
# Constants for accessing job context settings map
UPLOAD_URL = 'upload_url'
LAST_START_OFFSET = 'last_start_offset'
LAST_END_OFFSET = 'last_end_offset'
LAST_PAGE_SENT = 'last_page_sent'
LAST_PAGE_NUM_ITEMS = 'last_page_num_items'
CONSECUTIVE_FAILURES = 'consecutive_failures'
FAILURE_REASON = 'failure_reason'
ITEMS_UPLOADED = 'items_uploaded'
PII_SECRET = 'pii_secret'
# Constants for items within course settings schema
DATA_PUMP_SETTINGS_SCHEMA_SECTION = 'data_pump'
PROJECT_ID = 'project_id'
DATASET_NAME = 'dataset_name'
JSON_KEY = 'json_key'
TABLE_LIFETIME = 'table_lifetime'
PII_ENCRYPTION_TOKEN = 'pii_encryption_token'
def _get_data_source_class_by_name(name):
source_classes = data_sources.Registry.get_rest_data_source_classes()
for source_class in source_classes:
if source_class.__name__ == name and source_class.exportable():
return source_class
return None
class DataPumpJob(jobs.DurableJobBase):
@staticmethod
def get_description():
"""Job to push data from CourseBuilder to BigQuery.
The job operates from the deferred queue, and takes advantage of the
underlying TaskQueue retry and backoff support. One job is created
for each DataSource (see models/data_source). This job moves data
from the paginated data source up to Google BigQuery via the
retryable POST method.
Jobs here run on the TaskQueue named "default along with all other
CB deferred tasks because that queue has a reasonable set of config
parameters. However, there is nothing about these jobs that
requires interleaving with others if queue parameters need to be
tuned. Functional tests will need to be changed to have
execute_all_deferred_tasks() pass the name of the new queue.
"""
def __init__(self, app_context, data_source_class_name):
if not _get_data_source_class_by_name(data_source_class_name):
raise ValueError(
'No such data source "%s", or data source is not marked '
'as exportable.' % data_source_class_name)
super(DataPumpJob, self).__init__(app_context)
self._data_source_class_name = data_source_class_name
self._job_name = 'job-datapump-%s-%s' % (self._data_source_class_name,
self._namespace)
def non_transactional_submit(self):
"""Callback used when UI gesture indicates this job should start."""
sequence_num = super(DataPumpJob, self).non_transactional_submit()
deferred.defer(self.main, sequence_num)
return sequence_num
def _mark_job_canceled(self, job, message, duration):
"""Override default behavior of setting job.output to error string."""
if job.output:
job_context, data_source_context = self._load_state(
job, job.sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = self._build_data_source_context()
job_context[FAILURE_REASON] = message
self._save_state(jobs.STATUS_CODE_FAILED, job, job.sequence_num,
job_context, data_source_context,
use_transaction=False)
def _build_data_source_context(self):
"""Set up context class specific to data source type we pull from."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
# TODO(mgainer): if we start getting timeout failures, perhaps learn
# proper chunk size from history, rather than using default.
default_chunk_size = data_source_class.get_default_chunk_size()
return context_class.build_blank_default({}, default_chunk_size)
def _build_job_context(self, upload_url, pii_secret):
"""Set up context object used to maintain this job's internal state."""
job_context = {
UPLOAD_URL: upload_url,
LAST_START_OFFSET: 0,
LAST_END_OFFSET: -1,
LAST_PAGE_SENT: -1,
LAST_PAGE_NUM_ITEMS: 0,
CONSECUTIVE_FAILURES: [],
FAILURE_REASON: '',
ITEMS_UPLOADED: 0,
PII_SECRET: pii_secret,
}
return job_context
def _load_state(self, job, sequence_num):
if job.sequence_num != sequence_num:
raise ValueError(
'Abandoning stale job with sequence %d; '
'there is a new job with sequence %d running.' % (
sequence_num, job.sequence_num))
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
content = transforms.loads(job.output)
job_context = content['job_context']
data_source_context_class = data_source_class.get_context_class()
data_source_context = data_source_context_class.build_from_dict(
content['data_source_context'])
return job_context, data_source_context
def _save_state(self, state, job, sequence_num, job_context,
data_source_context, use_transaction=True):
# Job context may have been made with blank values for these two items.
# Recover them from the previous context if they are not set (and if
# the previous context is present enough to have them)
try:
prev_job_context, _ = self._load_state(job, sequence_num)
if not job_context[PII_SECRET]:
job_context[PII_SECRET] = prev_job_context[PII_SECRET]
if not job_context[UPLOAD_URL]:
job_context[UPLOAD_URL] = prev_job_context[UPLOAD_URL]
except (ValueError, AttributeError):
pass
# Convert data source context object to plain dict.
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
context_class = data_source_class.get_context_class()
data_source_context_dict = context_class.save_to_dict(
data_source_context)
# Set job object state variables.
now = datetime.datetime.now()
job.output = transforms.dumps({
'job_context': job_context,
'data_source_context': data_source_context_dict,
})
job.status_code = state
job.execution_time_sec += int((now - job.updated_on).total_seconds())
job.updated_on = now
logging.info('Data pump job %s saving contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Using _update in DurableJobEntity
# pylint: disable=protected-access
if use_transaction:
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(
xg_on, jobs.DurableJobEntity._update, self._job_name,
sequence_num, job.status_code, job.output,
job.execution_time_sec)
else:
jobs.DurableJobEntity._update(self._job_name, sequence_num,
job.status_code, job.output,
job.execution_time_sec)
@classmethod
def _parse_pii_encryption_token(cls, token):
parts = token.split('/')
return (parts[0],
datetime.datetime(year=1970, month=1, day=1) +
datetime.timedelta(seconds=int(parts[1])))
@classmethod
def _is_pii_encryption_token_valid(cls, token):
try:
_, valid_until_date = cls._parse_pii_encryption_token(token)
return valid_until_date > datetime.datetime.now()
except ValueError:
return False
@classmethod
def _build_new_pii_encryption_token(cls, timedelta_string):
hmac_secret = base64.urlsafe_b64encode(
os.urandom(int(PII_SECRET_LENGTH * 0.75)))
table_lifetime_seconds = common_utils.parse_timedelta_string(
timedelta_string).total_seconds()
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
now = datetime.datetime.now()
table_lifetime_timedelta = datetime.timedelta(
seconds=table_lifetime_seconds)
valid_until_timestamp = int(
(now - unix_epoch + table_lifetime_timedelta).total_seconds())
pii_encryption_token = '%s/%d' % (hmac_secret,
valid_until_timestamp)
return pii_encryption_token
@classmethod
def _get_pii_token(cls, app_context):
"""Retrieve or generate and save a secret used to encrypt exported PII.
All PII data in objects exported to BigQuery is either suppressed
or transformed via a one-way hash using a secret value. The point
of the transformation is so that exported data cannot trivially be
correlated to any individual's data in CourseBuilder, but records
in exported data encoded using the same key can. (E.g., a user_id
is the key for students; this key should be usable to correlate a
user's language preference with his test scores.)
Once data has been exported from CourseBuilder to BigQuery, the
internal permissions from CourseBuilder no longer apply. To minimize
the ability of those with access to the data to perform long-term
correlations that might identify individuals, the secret used to
encode PII is automatically rotated on a period determined by the
course settings. We re-use the expiration period for tables, or
default to 30 days if no period is selected.
The format for the stored setting is a string composed of:
- A randomly-generated secret encoded as a base-64 string
- A slash character ('/')
- A Unix timestamp indicating the expiration date of the token.
The expiration date approach is chosen so that within the expiration
period, different data sources can be re-exported multiple times, but
still correlated with one another in BigQuery. Upon expiration, a
new token is generated and used. Data exported before and after the
changeover cannot be directly correlated. (It may be possible to
force a correlation if old versions of the data tables were downloaded
by comparing non-key fields in the old/new versions, if the non-key
fields are sufficiently discriminative)
Args:
app_context: Standard CB application context object.
Returns:
Secret string used for encoding PII data upon export.
"""
course_settings = app_context.get_environ()
pump_settings = course_settings.get(DATA_PUMP_SETTINGS_SCHEMA_SECTION,
{})
pii_encryption_token = pump_settings.get(PII_ENCRYPTION_TOKEN)
if (not pii_encryption_token or
not cls._is_pii_encryption_token_valid(pii_encryption_token)):
pii_encryption_token = cls._build_new_pii_encryption_token(
pump_settings.get(TABLE_LIFETIME,
PII_SECRET_DEFAULT_LIFETIME))
pump_settings[PII_ENCRYPTION_TOKEN] = pii_encryption_token
course = courses.Course(None, app_context=app_context)
course.save_settings(course_settings)
return pii_encryption_token
@classmethod
def _get_pii_secret(cls, app_context):
secret, _ = cls._parse_pii_encryption_token(
cls._get_pii_token(app_context))
return secret
def _get_bigquery_settings(self, app_context):
"""Pull settings necessary for using BigQuery from DB.
This is nice and verbose and paranoid, so that if there is any
misconfiguration, the end-user gets a nice message that's specific
about the particular problem, rather than just a KeyError or
ValueError.
Args:
app_context: The standard app context for the course in question.
Returns:
A namedtuple containing private_key, client_email, project_id
and dataset_id members. The first three are required to connect
to BigQuery, and the last is the dataset within BigQuery to
which the data pump will restrict itself for insert/write/delete
operations.
Raises:
ValueError: if any expected element is missing or malformed.
"""
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
dataset_id = (
pump_settings.get(DATASET_NAME) or
re.sub('[^a-z_:-]', '', app_context.get_slug().lower()) or
'course')
project_id = pump_settings.get(PROJECT_ID)
if not project_id:
raise ValueError('Cannot pump data without a course settings value '
'for the target Google BigQuery project ID')
json_key = pump_settings.get(JSON_KEY)
if not json_key:
raise ValueError('Cannot pump data without a JSON client key '
'allowing access to the target Google BigQuery '
'project')
try:
json_key = transforms.loads(json_key)
except ValueError:
raise ValueError('Cannot decode JSON client key for the target '
'Google BigQuery project.')
if 'private_key' not in json_key or 'client_email' not in json_key:
raise ValueError('The JSON client key for the target Google '
'BigQuery project does not seem to be well '
'formed; either the "private_key" or '
'"client_email" field is missing.')
table_lifetime_seconds = common_utils.parse_timedelta_string(
pump_settings.get(TABLE_LIFETIME, '')).total_seconds()
Settings = collections.namedtuple('Settings', [
'private_key', 'client_email', PROJECT_ID, 'dataset_id',
'table_lifetime_seconds'])
return Settings(json_key['private_key'], json_key['client_email'],
project_id, dataset_id, table_lifetime_seconds)
def _get_bigquery_service(self, bigquery_settings):
"""Get BigQuery API client plus HTTP client with auth credentials."""
credentials = oauth2client.client.SignedJwtAssertionCredentials(
bigquery_settings.client_email, bigquery_settings.private_key,
BIGQUERY_RW_SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
return apiclient.discovery.build(BIGQUERY_API_NAME,
BIGQUERY_API_VERSION, http=http), http
def _maybe_create_course_dataset(self, service, bigquery_settings):
"""Create dataset within BigQuery if it's not already there."""
datasets = service.datasets()
try:
datasets.get(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
datasets.insert(projectId=bigquery_settings.project_id,
body={
'datasetReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id
}}).execute()
def _maybe_delete_previous_table(self, tables, bigquery_settings):
"""Delete previous version of table for data source, if it exists."""
# TODO(mgainer): Make clobbering old table and replacing optional.
# For now, we assume people will be writing queries in terms of
# a single table name, and will be irritated at having to change
# their queries all the time if we add a timestamp to the table
# name. And no, AFAICT, the BigQuery API does not permit renaming
# of tables, just creation and deletion.
table_name = self._data_source_class_name.replace('DataSource', '')
try:
tables.delete(projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
tableId=table_name).execute()
except apiclient.errors.HttpError, ex:
if ex.resp.status != 404:
raise
def _json_schema_member_to_bigquery_schema(self, name, structure):
item = {'name': name}
if 'description' in structure:
item['description'] = structure['description']
if 'properties' in structure: # It's a sub-registry.
item['type'] = 'RECORD'
item['mode'] = 'NULLABLE'
item['fields'] = self._json_schema_to_bigquery_schema(
structure['properties'])
elif 'items' in structure: # It's an array
if 'items' in structure['items']:
raise ValueError(
'BigQuery schema descriptions do not support nesting '
'arrays directly in other arrays. Instead, nest '
'structures in arrays; those structures may contain '
'sub-arrays. Problem arises trying to pump data for %s' %
self._data_source_class_name)
item = self._json_schema_member_to_bigquery_schema(
name, structure['items'])
item['mode'] = 'REPEATED'
else:
item['mode'] = ('NULLABLE' if structure.get('optional')
else 'REQUIRED')
if structure['type'] in ('string', 'text', 'html', 'url', 'file'):
item['type'] = 'STRING'
elif structure['type'] in 'integer':
item['type'] = 'INTEGER'
elif structure['type'] in 'number':
item['type'] = 'FLOAT'
elif structure['type'] in 'boolean':
item['type'] = 'BOOLEAN'
elif structure['type'] in ('date', 'datetime'):
item['type'] = 'TIMESTAMP'
else:
raise ValueError(
'Unrecognized schema scalar type "%s" '
'when trying to make schema for data-pumping %s' % (
structure['type'], self._data_source_class_name))
return item
def _json_schema_to_bigquery_schema(self, json_schema_dict):
fields = []
for name, structure in json_schema_dict.iteritems():
fields.append(self._json_schema_member_to_bigquery_schema(
name, structure))
return fields
def _create_data_table(self, tables, bigquery_settings, schema):
"""Instantiate and provide schema for new BigQuery table."""
table_name = self._data_source_class_name.replace('DataSource', '')
request = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'schema': {'fields': schema}
}
# If user has requested it, set the time at which table should be
# reclaimed (as milliseconds since Unix epoch).
if bigquery_settings.table_lifetime_seconds:
now = datetime.datetime.now()
expiration_delta = datetime.timedelta(
seconds=bigquery_settings.table_lifetime_seconds)
unix_epoch = datetime.datetime(year=1970, month=1, day=1)
expiration_ms = int(
(now + expiration_delta - unix_epoch).total_seconds()) * 1000
request['expirationTime'] = expiration_ms
# Allow exceptions from here to propagate; we don't expect any problems,
# so if we have any, the upload should abort.
tables.insert(
projectId=bigquery_settings.project_id,
datasetId=bigquery_settings.dataset_id,
body=request).execute()
def _create_upload_job(self, http, bigquery_settings):
"""Before uploading, we must create a job to handle the upload.
Args:
http: An HTTP client object configured to send our auth token
bigquery_settings: Configs for talking to bigquery.
Returns:
URL specific to this upload job. Subsequent PUT requests to send
pages of data must be sent to this URL.
Raises:
Exception: on unexpected responses from BigQuery API.
"""
uri = '%s%s/jobs?uploadType=resumable' % (
BIGQUERY_API_UPLOAD_URL_PREFIX, bigquery_settings.project_id)
headers = {
'Content-Type': 'application/json',
'X-Upload-Content-Type': 'application/octet-stream',
}
table_name = self._data_source_class_name.replace('DataSource', '')
body = transforms.dumps({
'kind': 'bigquery#job',
'configuration': {
'load': {
'createDisposition': 'CREATE_NEVER', # Already exists.
'destinationTable': {
'projectId': bigquery_settings.project_id,
'datasetId': bigquery_settings.dataset_id,
'tableId': table_name,
},
'ignoreUnknownValues': False,
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
}
}
})
response, content = http.request(uri, method='POST',
body=body, headers=headers)
if int(response.get('status', 0)) != 200:
raise Exception('Got non-200 response when trying to create a '
'new upload job. Reponse was: "%s"; content '
'was "%s"' % (str(response), str(content)))
location = response.get('location')
if not location:
raise Exception('Expected response to contain a "location" item '
'giving a URL to send subsequent content to, but '
'instead got "%s"' % str(response))
return location
def _initiate_upload_job(self, bigquery_service, bigquery_settings, http,
app_context):
"""Coordinate table cleanup, setup, and initiation of upload job."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
table_schema = data_source_class.get_schema(app_context, catch_and_log_)
schema = self._json_schema_to_bigquery_schema(table_schema)
tables = bigquery_service.tables()
self._maybe_create_course_dataset(bigquery_service, bigquery_settings)
self._maybe_delete_previous_table(tables, bigquery_settings)
self._create_data_table(tables, bigquery_settings, schema)
upload_url = self._create_upload_job(http, bigquery_settings)
return upload_url
def _note_retryable_failure(self, message, job_context):
"""Log a timestamped message into the job context object."""
timestamp = datetime.datetime.now().strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
job_context[CONSECUTIVE_FAILURES].append(timestamp + ' ' + message)
def _randomized_backoff_timeout(self, job_context):
num_failures = len(job_context[CONSECUTIVE_FAILURES])
if not num_failures:
return 0
return min(MAX_RETRY_BACKOFF_SECONDS,
random.randrange(2 ** num_failures, 2 ** (num_failures + 1)))
def _check_upload_state(self, http, job_context):
"""Check with the BigQuery upload server to get state of our upload.
Due to various communication failure cases, we may not be aware of
the actual state of the upload as known to the server. Issue a blank
PUT request to evoke a response that will indicate:
- How far along we are in the upload
- Whether the upload has already completed
- Whether the upload job has taken too long and expired
Args:
http: An HTTP client object configured to send our auth token
job_context: Hash containing configuration for this upload job.
Returns:
A 2-tuple of next page to load (or None if no page should be
loaded), and the next jobs.STATUS_CODE_<X> to transition to.
"""
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
headers={'Content-Range': 'bytes */*'})
return self._handle_put_response(response, job_context, is_upload=False)
def _send_data_page_to_bigquery(self, data, is_last_chunk, next_page,
http, job, sequence_num, job_context,
data_source_context):
# BigQuery expects one JSON object per newline-delimed record,
# not a JSON array containing objects, so convert them individually.
# Less efficient, but less hacky than converting and then string
# manipulation.
lines = []
total_len = 0
for item in data:
line = transforms.dumps(item)
line += '\n'
total_len += len(line)
lines.append(line)
# Round data size up to next multiple of 256K, per
# https://cloud.google.com/bigquery/loading-data-post-request#chunking
padding_amount = 0
if not is_last_chunk:
round_to = 256 * 1024
if total_len % round_to:
padding_amount = round_to - (total_len % round_to)
lines.append(' ' * padding_amount)
payload = ''.join(lines)
# We are either re-attempting to send a page, or sending a new page.
# Adjust the job_context's last-sent state to reflect this.
job_context[LAST_PAGE_NUM_ITEMS] = len(data)
if next_page == job_context[LAST_PAGE_SENT]:
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
elif next_page == job_context[LAST_PAGE_SENT] + 1:
job_context[LAST_PAGE_SENT] = next_page
job_context[LAST_START_OFFSET] = (
job_context[LAST_END_OFFSET] + 1)
job_context[LAST_END_OFFSET] = (
job_context[LAST_START_OFFSET] + len(payload) - 1)
else:
raise Exception(
'Internal error - unexpected condition in sending page. '
'next_page=%d last_page=%d, num_items=%d' % (
next_page, job_context[LAST_PAGE_SENT], len(data)))
logging.info(
'Sending to BigQuery. %d items; %d padding bytes; is-last: %s',
len(data), padding_amount, str(is_last_chunk))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
(job_context[LAST_END_OFFSET] + 1) if is_last_chunk else '*')
}
response, _ = http.request(job_context[UPLOAD_URL], method='PUT',
body=payload, headers=headers)
_, next_state = self._handle_put_response(response, job_context,
is_upload=True)
return next_state
def _handle_put_response(self, response, job_context, is_upload=True):
"""Update job_context state depending on response from BigQuery."""
status = int(response['status'])
logging.info('Response from bigquery: %d; %s', status, str(response))
next_page = None
next_status = jobs.STATUS_CODE_STARTED
if status == 308:
# Google's push-partial-data usurps the usual meaning of 308 to
# instead mean "partial request incomplete"; here, it's telling
# us that the request has partially completed, and it will give
# us a Range: header to indicate how far it thinks we've gone.
# We only care about the upper end of the range.
if 'range' not in response:
last_offset_received = -1
else:
last_offset_received = int(response['range'].split('-')[1])
if last_offset_received == job_context[LAST_END_OFFSET]:
# The nominal case; the reported index of the last byte
# received exactly matches what we think we sent. Tell our
# caller we are ready to try the next page, and count up
# the total number of items sent only now that we have seen
# the receiving side's acknowledgement.
next_page = job_context[LAST_PAGE_SENT] + 1
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
# Don't clear the list of failures if this is handling the
# pre-check done before uploading. Experiments show that
# persistent problems with our requests result in 503's on
# upload, but 308's (reporting no progress made) on check.
# We want to eventually fail out if we're constantly getting
# errors, so ignore the "success" on checking status.
if is_upload:
job_context[CONSECUTIVE_FAILURES] = []
elif (last_offset_received >= job_context[LAST_START_OFFSET] - 1 and
last_offset_received < job_context[LAST_END_OFFSET]):
# If the last offset received is not the same as the last offset
# sent, that's possibly OK; verify that the last offset received
# is sane. Here, "sane" means that we accept seeing the
# last offset of the previous page sent (last_start_offset-1)
# up to, but not including the last_end_offset (for the page
# we just sent). Anything lower means that our algorithm
# mistakenly skipped past a failure. Anything higher means
# that we have somehow become confused and decided to step
# backward (or BigQuery is lying to us).
prev_page_size = (job_context[LAST_END_OFFSET] -
job_context[LAST_START_OFFSET] + 1)
bytes_received = (last_offset_received -
job_context[LAST_START_OFFSET] + 1)
self._note_retryable_failure(
'Incomplete upload detected - %d of %d bytes received '
'for page %d' %
(bytes_received, prev_page_size,
job_context[LAST_PAGE_SENT]), job_context)
next_page = job_context[LAST_PAGE_SENT]
else:
raise ValueError(
'Uploaded byte count of %d does not fall in the range '
'%d to %d, the start/end range for previously-sent page '
'number %d. Abandoning upload.' % (
last_offset_received, job_context[LAST_START_OFFSET],
job_context[LAST_END_OFFSET],
job_context[LAST_PAGE_SENT]))
elif status in (200, 201):
# BigQuery confirms that it has seen the upload complete. (Note
# that this is *not* a promise that the upload has parsed
# correctly; there doesn't seem to be a clean way to ask about
# that other than to probe the table for number of rows uploaded
# until we see the desired number or time out. Ick.)
job_context[ITEMS_UPLOADED] += job_context[LAST_PAGE_NUM_ITEMS]
job_context[LAST_PAGE_NUM_ITEMS] = 0
next_status = jobs.STATUS_CODE_COMPLETED
elif status == 404:
# Unlikely, but possible. For whatever reason, BigQuery has
# decided that our upload URL is no longer valid. (Docs say that
# we are allowed up to a day to get an upload done, but do not
# promise that this is the only reason a job may become invalid.)
# We need to start again from scratch. To start over, we will
# just skip uploading a data page this round, and set ourselves up
# to be called back again from the deferred-tasks queue. When the
# callback happens, STATUS_CODE_QUEUED will indicate we need to
# re-init everything from scratch.
next_status = jobs.STATUS_CODE_QUEUED
elif status in (500, 502, 503, 504):
# Server Error, Bad Gateway, Service Unavailable or Gateway Timeout.
# In all of these cases, we do a randomized exponential delay before
# retrying.
self._note_retryable_failure('Retryable server error %d' % status,
job_context)
else:
raise ValueError(
'Got unexpected status code %d from BigQuery in response %s' %
(status, str(response)))
return next_page, next_status
def _fetch_page_data(self, app_context, data_source_context, next_page):
"""Get the next page of data from the data source."""
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
catch_and_log_ = catch_and_log.CatchAndLog()
is_last_page = False
with catch_and_log_.propagate_exceptions('Loading page of data'):
schema = data_source_class.get_schema(app_context, catch_and_log_)
required_jobs = data_sources.utils.get_required_jobs(
data_source_class, app_context, catch_and_log_)
data, _ = data_source_class.fetch_values(
app_context, data_source_context, schema, catch_and_log_,
next_page, *required_jobs)
if (data_source_class.get_default_chunk_size() == 0 or
not hasattr(data_source_context, 'chunk_size') or
len(data) < data_source_context.chunk_size):
is_last_page = True
else:
# Here, we may have read to the end of the table and just
# happened to end up on an even chunk boundary. Attempt to
# read one more row so that we can discern whether we really
# are at the end.
# Don't use the normal data_source_context; we don't want it
# to cache a cursor for the next page that will only retrieve
# one row.
throwaway_context = copy.deepcopy(data_source_context)
throwaway_context.chunk_size = 1
next_data, actual_page = data_source_class.fetch_values(
app_context, throwaway_context, schema, catch_and_log_,
next_page + 1, *required_jobs)
if not next_data or actual_page == next_page:
is_last_page = True
return data, is_last_page
def _send_next_page(self, sequence_num, job):
"""Coordinate table setup, job setup, sending pages of data."""
# Gather necessary resources
app_context = sites.get_course_index().get_app_context_for_namespace(
self._namespace)
pii_secret = self._get_pii_secret(app_context)
bigquery_settings = self._get_bigquery_settings(app_context)
bigquery_service, http = self._get_bigquery_service(bigquery_settings)
# If this is our first call after job start (or we have determined
# that we need to start over from scratch), do initial setup.
# Otherwise, re-load context objects from saved version in job.output
if job.status_code == jobs.STATUS_CODE_QUEUED:
upload_url = self._initiate_upload_job(
bigquery_service, bigquery_settings, http, app_context)
job_context = self._build_job_context(upload_url, pii_secret)
data_source_context = self._build_data_source_context()
else:
job_context, data_source_context = self._load_state(
job, sequence_num)
if hasattr(data_source_context, 'pii_secret'):
data_source_context.pii_secret = pii_secret
logging.info('Data pump job %s loaded contexts: %s %s',
self._job_name, str(job_context), str(data_source_context))
# Check BigQuery's state. Based on that, choose the next page of data
# to push. Depending on BigQuery's response, we may or may not be
# able to send a page now.
next_page, next_state = self._check_upload_state(http, job_context)
if next_page is not None:
data, is_last_chunk = self._fetch_page_data(
app_context, data_source_context, next_page)
next_state = self._send_data_page_to_bigquery(
data, is_last_chunk, next_page,
http, job, sequence_num, job_context, data_source_context)
self._save_state(next_state, job, sequence_num, job_context,
data_source_context)
# If we are not done, enqueue another to-do item on the deferred queue.
if len(job_context[CONSECUTIVE_FAILURES]) >= MAX_CONSECUTIVE_FAILURES:
raise Exception('Too many consecutive failures; abandoning job.')
elif not job.has_finished:
backoff_seconds = self._randomized_backoff_timeout(job_context)
logging.info('%s re-queueing for subsequent work', self._job_name)
deferred.defer(self.main, sequence_num, _countdown=backoff_seconds)
else:
logging.info('%s complete', self._job_name)
def main(self, sequence_num):
"""Callback entry point. Manage namespaces, failures; send data."""
logging.info('%s de-queued and starting work.', self._job_name)
job = self.load()
if not job:
raise deferred.PermanentTaskFailure(
'Job object for %s not found!' % self._job_name)
if job.has_finished:
return # We have been canceled; bail out immediately.
with common_utils.Namespace(self._namespace):
try:
self._send_next_page(sequence_num, job)
except Exception, ex:
try:
# Log origin of exception to permit troubleshooting.
# Do this in try/finally block to conform to Python docs'
# recommendation to avoid circular reference to traceback
# object.
origin_traceback = sys.exc_info()[2]
logging.critical('%s: job abandoned due to fatal error %s',
self._job_name, str(ex))
logging.critical(''.join(
traceback.format_tb(origin_traceback)))
finally:
pass
# Log failure in job object as well.
if job.output:
job_context, data_source_context = self._load_state(
job, sequence_num)
else:
job_context = self._build_job_context(None, None)
data_source_context = (self._build_data_source_context())
job_context[FAILURE_REASON] = str(ex)
self._save_state(jobs.STATUS_CODE_FAILED, job, sequence_num,
job_context, data_source_context)
# PermanentTaskFailure tells deferred queue to give up on us.
raise deferred.PermanentTaskFailure('Job %s failed: %s' % (
self._job_name, str(ex)))
def get_display_dict(self, app_context):
"""Set up dict for Jinja rendering on data_pump.html."""
ret = {
'name': self._data_source_class_name,
'status': 'Has Never Run',
'active': False,
}
job = self.load()
if job:
ret['status'] = jobs.STATUS_CODE_DESCRIPTION[job.status_code]
ret['active'] = not job.has_finished
ret['sequence_number'] = job.sequence_num
ret['updated_on'] = job.updated_on.strftime(
utils.HUMAN_READABLE_TIME_FORMAT)
if job.has_finished:
duration = job.execution_time_sec
else:
duration = int((datetime.datetime.now() -
job.updated_on) .total_seconds())
ret['duration'] = datetime.timedelta(days=0, seconds=duration)
ret['last_updated'] = job.updated_on.strftime(
utils.HUMAN_READABLE_DATETIME_FORMAT)
bigquery_settings = self._get_bigquery_settings(app_context)
ret['bigquery_url'] = '%s%s:%s.%s' % (
BIGQUERY_UI_URL_PREFIX, bigquery_settings.project_id,
bigquery_settings.dataset_id,
self._data_source_class_name.replace('DataSource', ''))
try:
job_context, _ = self._load_state(job, job.sequence_num)
ret['job_context'] = job_context
current_secret = DataPumpJob._get_pii_secret(app_context)
if job_context[PII_SECRET] != current_secret:
ret['pii_secret_is_out_of_date'] = True
del job_context[PII_SECRET]
except (ValueError, AttributeError):
# When jobs framework catches a failure, it overwrites the
# job.output with the failure message as a string. We will
# get here if we fail to parse job.output as a JSON-packed
# object.
ret['message'] = job.output
data_source_class = _get_data_source_class_by_name(
self._data_source_class_name)
ret['source_url'] = '%s/rest/data/%s/items?chunk_size=10' % (
app_context.get_slug(), data_source_class.get_name())
catch_and_log_ = catch_and_log.CatchAndLog()
ret['schema'] = data_source_class.get_schema(app_context,
catch_and_log_)
ret['generator_statuses'] = []
ret['available'] = True
ret['any_generator_running'] = False
required_generators = data_source_class.required_generators()
if not required_generators:
ret['generator_statuses'].append('(No dependencies)')
ret['has_any_generators'] = False
else:
ret['has_any_generators'] = True
for generator_class in required_generators:
generator = generator_class(app_context)
job = generator.load()
ret['generator_statuses'].append(
analytics.display.get_generator_status_message(
generator_class, job))
if not job or job.status_code != jobs.STATUS_CODE_COMPLETED:
ret['available'] = False
if job and not job.has_finished:
ret['any_generator_running'] = True
return ret
class DataPumpJobsDataSource(data_sources.SynchronousQuery):
"""Present DataPump job status as an analytic generated at page-render time.
This is a very mild hack. Since the data pump job controls show up as a
sub-tab under Dashboard -> Analytics, the easiest way to generate tab
content is to act as though we are an analytic. And we are, in a sense -
this analytic just happens to generate a table of data-pump job statuses,
rather than analytics about student performance. This also conveniently
re-uses all the mechanics for authorization, dispatch, page-painting, etc.
"""
@staticmethod
def required_generators():
return []
@staticmethod
def fill_values(app_context, template_values):
template_values['xsrf_token'] = (
crypto.XsrfTokenManager.create_xsrf_token(XSRF_ACTION_NAME))
source_classes = [
ds for ds in data_sources.Registry.get_rest_data_source_classes()
if ds.exportable()]
source_classes.sort(key=lambda c: c.__name__)
# pylint: disable=protected-access
template_values['pumps'] = []
for source_class in source_classes:
job = DataPumpJob(app_context, source_class.__name__)
template_values['pumps'].append(job.get_display_dict(app_context))
pump_settings = app_context.get_environ().get(
DATA_PUMP_SETTINGS_SCHEMA_SECTION, {})
template_values['need_settings'] = (
not pump_settings.has_key(PROJECT_ID) or
not pump_settings.has_key(JSON_KEY))
template_values[DATASET_NAME] = pump_settings.get(DATASET_NAME)
custom_module = None
class DashboardExtension(object):
"""Respond to UI run/cancel commands for individual data pump jobs."""
@classmethod
def register(cls):
# Register new permission for pushing student data to external location.
dashboard.DashboardHandler.add_external_permission(
ACCESS_PERMISSION, ACCESS_PERMISSION_DESCRIPTION)
# Register a new Analytics sub-tab for showing data pump status and
# start/stop buttons.
data_pump_visualization = analytics.Visualization(
'data_pumps', 'Data Pumps', 'data_pump.html',
data_source_classes=[DataPumpJobsDataSource])
tabs.Registry.register('analytics', 'data_pump', 'Data Pump',
[data_pump_visualization])
def post_action(handler):
cls(handler).post_data_pump()
dashboard.DashboardHandler.post_actions.append(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION,
post_action)
dashboard.DashboardHandler.map_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
@classmethod
def unregister(cls):
dashboard.DashboardHandler.post_actions.remove(DASHBOARD_ACTION)
setattr(dashboard.DashboardHandler, 'post_%s' % DASHBOARD_ACTION, None)
dashboard.DashboardHandler.unmap_action_to_permission(
'post_%s' % DASHBOARD_ACTION, ACCESS_PERMISSION)
dashboard.DashboardHandler.remove_external_permission(ACCESS_PERMISSION)
roles.Roles.unregister_permissions(custom_module)
def post_data_pump(self):
source_name = self.handler.request.get('data_source')
data_source_class = _get_data_source_class_by_name(source_name)
if data_source_class:
data_pump_job = DataPumpJob(self.handler.app_context, source_name)
action = self.handler.request.get('pump_action')
if action == 'start_pump':
data_pump_job.submit()
elif action == 'cancel_pump':
data_pump_job.cancel()
elif action == 'run_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).submit()
elif action == 'cancel_generators':
for generator_class in data_source_class.required_generators():
generator_class(self.handler.app_context).cancel()
self.handler.redirect(self.handler.get_action_url(
'analytics', extra_args={'tab': 'data_pump'}, fragment=source_name))
def __init__(self, handler):
self.handler = handler
def register_module():
"""Adds this module to the registry. Called once at startup."""
project_id = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PROJECT_ID,
'Project ID', 'string',
description='The ID (not the name!) of the Project to which to '
'send data. See the list of projects and their IDs at '
'https://console.developers.google.com/project',
i18n=False)
dataset_name = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + DATASET_NAME,
'Dataset Name', 'string',
description='Name of the BigQuery dataset to which to pump tables. '
'If not set, this will default to the name of the course.',
optional=True, i18n=False)
json_key = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + JSON_KEY,
'JSON Key', 'text',
i18n=False,
description='Contents of a JSON key created in the Developers Console '
'for the instance where BigQuery is to be run. See '
# TODO(mgainer): Get CB location of instructions to get client key
# for destination application.
'the instructions at ')
table_lifetime = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + TABLE_LIFETIME,
'Table Lifetime', 'string',
optional=True, i18n=False,
description='Amount of time a table pushed to BigQuery will last. '
'After this amount of time, the table will be automatically deleted. '
'(This is useful if your data retention or privacy policy mandates '
'a limited time for analysis after which personal data must be '
'removed.) Leaving this field blank or setting it to zero will '
'cause BigQuery to indefinitely retain data. Supported units are: '
'"weeks", "days", "hours", "minutes", "seconds". Units may be '
'specified as their first letter, singular, or plural. Spaces '
'and commas may be used or omitted. E.g., both of the following '
'are equivalent: "3w1d7h", "3 weeks, 1 day, 7 hours"')
pii_encryption_token = schema_fields.SchemaField(
DATA_PUMP_SETTINGS_SCHEMA_SECTION + ':' + PII_ENCRYPTION_TOKEN,
'PII Encryption Token', 'string',
optional=True, i18n=False, editable=False,
description='Automatically generated encryption secret used to '
'obscure PII fields when these are pushed to BigQuery. This '
'key lasts only as long as the Table Lifetime setting above, or '
'30 days if the limit is not set. After this secret has expired, '
'a new secret will be generated. PII items with the same un-obscured '
'value which are obscured with different values for this secret will '
'have different values. Most importantly, this means that joins on '
'fields that should be the same (e.g., user ID) will not work.')
course_settings_fields = (
lambda c: project_id,
lambda c: json_key,
lambda c: dataset_name,
lambda c: table_lifetime,
lambda c: pii_encryption_token,
)
def on_module_enabled():
data_sources.Registry.register(DataPumpJobsDataSource)
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION] += course_settings_fields
tabs.Registry.register('settings', 'data_pump', 'Data Pump',
DATA_PUMP_SETTINGS_SCHEMA_SECTION)
DashboardExtension.register()
def on_module_disabled():
for field in course_settings_fields:
courses.Course.OPTIONS_SCHEMA_PROVIDERS[
DATA_PUMP_SETTINGS_SCHEMA_SECTION].remove(field)
DashboardExtension.unregister()
global custom_module
custom_module = custom_modules.Module(
'Data Pump', 'Pushes DB and generated content to a BigQuery project',
[], [],
notify_module_enabled=on_module_enabled,
notify_module_disabled=on_module_disabled)
return custom_module
|
{
"content_hash": "7f2fd946818aae8bd50c4b3b3f94852a",
"timestamp": "",
"source": "github",
"line_count": 1145,
"max_line_length": 80,
"avg_line_length": 47.0235807860262,
"alnum_prop": 0.6009063556331489,
"repo_name": "wavemind/gcb17ml",
"id": "8c253a79dc8d6f78c09fbc54dc6a36b80d51d498",
"size": "54440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/data_pump/data_pump.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62209"
},
{
"name": "JavaScript",
"bytes": "425162"
},
{
"name": "Python",
"bytes": "3344249"
},
{
"name": "Shell",
"bytes": "23773"
}
],
"symlink_target": ""
}
|
import json
from urllib import urlencode
from urllib2 import urlopen
#get_encoding, decode_page, check_status, and geocode are largely
#from the GoogleV3 module in the geopy library
#Per MIT liscence, we have included the following from geopy
#Copyright (c) 2006-2010 Brian Beck
#Copyright (c) 2010-2012 GeoPy Project and individual contributors.
#All rights reserved.
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
def get_encoding(page, contents=None):
charset = page.headers.getparam("charset") or None
if charset:
return charset
if contents:
try:
return xml.dom.minidom.parseString(contents).encoding
except ExpatError:
pass
def decode_page(page):
contents = page.read()
# HTTP 1.1 defines iso-8859-1 as the 'implied' encoding if none is given
encoding = get_encoding(page, contents) or 'iso-8859-1'
return unicode(contents, encoding=encoding).encode('utf-8')
def check_status(status):
'''Validates error statuses.'''
if status == 'ZERO_RESULTS':
raise GQueryError(
'The geocode was successful but returned no results. This may'
' occur if the geocode was passed a non-existent address or a'
' latlng in a remote location.')
elif status == 'OVER_QUERY_LIMIT':
raise GTooManyQueriesError(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.')
elif status == 'REQUEST_DENIED':
raise GQueryError(
'Your request was denied, probably because of lack of a'
' sensor parameter.')
elif status == 'INVALID_REQUEST':
raise GQueryError('Probably missing address or latlng.')
else:
raise GeocoderResultError('Unkown error.')
def geocode(string):
if isinstance(string, unicode):
string = string.encode('utf-8')
params = {
'address': string,
'sensor': str(False).lower()
}
page = urlopen('http://maps.googleapis.com/maps/api/geocode/json?%(params)s' % ({'params': urlencode(params)}))
if not isinstance(page, basestring):
page = decode_page(page)
doc = json.loads(page)
places = doc.get('results', [])
if not places:
check_status(doc.get('status'))
elif len(places) != 1:
raise ValueError(
"Didn't find exactly one placemark! (Found %d)" % len(places))
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted_address')
latitude = place['geometry']['location']['lat']
longitude = place['geometry']['location']['lng']
return (location, (latitude, longitude))
return parse_place(places[0])
|
{
"content_hash": "f5f120005d33a425258b1a5f264d4e07",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 115,
"avg_line_length": 37.6764705882353,
"alnum_prop": 0.677855841790268,
"repo_name": "daeyun/campus-path",
"id": "2d65badb3611c8282cc9708a67b6face8d94f191",
"size": "3843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5256"
},
{
"name": "JavaScript",
"bytes": "68323"
},
{
"name": "Python",
"bytes": "69425"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('municipal_finance', '0009_auto_20170301_1230'),
]
operations = [
migrations.AlterField(
model_name='agedcreditorfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='agedcreditorfacts',
name='item_code',
field=models.ForeignKey(db_column='item_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.AgedCreditorItems'),
),
migrations.AlterField(
model_name='agedcreditorfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='agedcreditoritems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='ageddebtorfacts',
name='customer_group_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='ageddebtorfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='ageddebtorfacts',
name='item_code',
field=models.ForeignKey(db_column='item_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.AgedDebtorItems'),
),
migrations.AlterField(
model_name='ageddebtorfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='ageddebtoritems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='amounttype',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='bsheetfacts',
name='item_code',
field=models.ForeignKey(db_column='item_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.BsheetItems'),
),
migrations.AlterField(
model_name='bsheetitems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='capitalfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='capitalfacts',
name='function_code',
field=models.ForeignKey(db_column='function_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.GovernmentFunctions'),
),
migrations.AlterField(
model_name='capitalfacts',
name='item_code',
field=models.ForeignKey(db_column='item_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.CapitalItems'),
),
migrations.AlterField(
model_name='capitalfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='capitalitems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='cflowfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='cflowfacts',
name='item_code',
field=models.ForeignKey(db_column='item_code', on_delete=django.db.models.deletion.DO_NOTHING, to='municipal_finance.CflowItems'),
),
migrations.AlterField(
model_name='cflowfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='cflowitems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='conditionalgrantsfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='conditionalgrantsfacts',
name='grant_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='conditionalgrantsfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='governmentfunctions',
name='category_label',
field=models.TextField(),
),
migrations.AlterField(
model_name='governmentfunctions',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='governmentfunctions',
name='subcategory_label',
field=models.TextField(),
),
migrations.AlterField(
model_name='incexpfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='incexpfacts',
name='function_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='incexpfacts',
name='item_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='incexpfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='incexpitems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='municipalitystaffcontacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='municipalitystaffcontacts',
name='role',
field=models.TextField(),
),
migrations.AlterField(
model_name='repmaintfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='repmaintfacts',
name='item_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='repmaintfacts',
name='period_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='repmaintitems',
name='label',
field=models.TextField(),
),
migrations.AlterField(
model_name='uifwexpfacts',
name='demarcation_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='uifwexpfacts',
name='financial_year',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='uifwexpfacts',
name='item_code',
field=models.TextField(),
),
migrations.AlterField(
model_name='uifwexpfacts',
name='item_label',
field=models.TextField(),
),
]
|
{
"content_hash": "ee37f16e89384267fa66fe767d34eead",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 155,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.5389945652173913,
"repo_name": "Code4SA/municipal-data",
"id": "277ade9de4a7fc6d417197fd5635cd98c2bcf5cf",
"size": "7432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "municipal_finance/migrations/0010_auto_20170301_1256.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "172224"
},
{
"name": "HTML",
"bytes": "166174"
},
{
"name": "JavaScript",
"bytes": "466942"
},
{
"name": "PLpgSQL",
"bytes": "23056"
},
{
"name": "Python",
"bytes": "215064"
},
{
"name": "Shell",
"bytes": "905"
}
],
"symlink_target": ""
}
|
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class WlbItemAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.color = None
self.goods_cat = None
self.height = None
self.is_dangerous = None
self.is_friable = None
self.is_sku = None
self.item_code = None
self.length = None
self.name = None
self.package_material = None
self.price = None
self.pricing_cat = None
self.pro_name_list = None
self.pro_value_list = None
self.remark = None
self.support_batch = None
self.title = None
self.type = None
self.volume = None
self.weight = None
self.width = None
def getapiname(self):
return 'taobao.wlb.item.add'
|
{
"content_hash": "c464f1a6fd43809eb310ed80c8b3ac3b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 55,
"avg_line_length": 25.29032258064516,
"alnum_prop": 0.6581632653061225,
"repo_name": "CooperLuan/devops.notes",
"id": "9cecefa74c33d55d9d6851b491af9e3ee7b66bf7",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/WlbItemAddRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
}
|
"""Setup script for oauth2client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
import setup_utils
has_setuptools = False
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
packages = [
'oauth2client',
]
install_requires = []
py_modules = []
# (module to test for, install_requires to add if missing, packages to add if missing, py_modules to add if missing)
REQUIREMENTS = [
('httplib2', 'httplib2', 'httplib2', None),
('gflags', 'python-gflags', None, ['gflags', 'gflags_validators']),
(['json', 'simplejson', 'django.utils'], 'simplejson', 'simplejson', None)
]
for import_name, requires, package, modules in REQUIREMENTS:
if setup_utils.is_missing(import_name):
if has_setuptools:
install_requires.append(requires)
else:
if package is not None:
packages.append(package)
else:
py_modules.extend(modules)
long_desc = """The oauth2client is a client library for OAuth 2.0."""
setup(name="oauth2client",
version="1.0beta2",
description="OAuth 2.0 client library",
long_description=long_desc,
author="Joe Gregorio",
author_email="jcgregorio@google.com",
url="http://code.google.com/p/google-api-python-client/",
install_requires=install_requires,
packages=packages,
py_modules=py_modules,
license="Apache 2.0",
keywords="google oauth 2.0 http client",
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
|
{
"content_hash": "60455bf80b02a931dfc38fc5dfbc4390",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 116,
"avg_line_length": 30.271186440677965,
"alnum_prop": 0.6556550951847704,
"repo_name": "MapofLife/MOL",
"id": "ec87873d325a375900d5c0a7ae71aa91c3b2c1ec",
"size": "2367",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "earthengine/google-api-python-client/setup_oauth2client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83354"
},
{
"name": "CSS",
"bytes": "245523"
},
{
"name": "JavaScript",
"bytes": "1302309"
},
{
"name": "PHP",
"bytes": "613"
},
{
"name": "Perl",
"bytes": "2100"
},
{
"name": "Python",
"bytes": "1953387"
},
{
"name": "R",
"bytes": "52"
},
{
"name": "SQL",
"bytes": "21299"
},
{
"name": "Shell",
"bytes": "3146"
}
],
"symlink_target": ""
}
|
import os
import argparse
import inspect
import sys
from nipype.interfaces.base import Interface
from nipype.utils.misc import str2bool
def listClasses(module=None):
if module:
__import__(module)
pkg = sys.modules[module]
print "Available Interfaces:"
for k,v in pkg.__dict__.items():
if inspect.isclass(v) and issubclass(v, Interface):
print "\t%s"%k
def add_options(parser=None, module=None, function=None):
interface = None
if parser and module and function:
__import__(module)
interface = getattr(sys.modules[module],function)()
inputs = interface.input_spec()
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
desc = "\n".join(interface._get_trait_desc(inputs, name, spec))[len(name)+2:]
if hasattr(spec, "mandatory") and spec.mandatory:
parser.add_argument(name, help=desc)
else:
parser.add_argument("--%s"%name, dest=name,
help=desc)
return parser, interface
def run_instance(interface, options):
if interface:
print "setting function inputs"
for input_name, _ in interface.inputs.items():
if getattr(options, input_name) != None:
value = getattr(options, input_name)
#traits cannot cast from string to float or int
try:
value = float(value)
except:
pass
#try to cast string input to boolean
try:
value = str2bool(value)
except:
pass
try:
setattr(interface.inputs, input_name,
value)
except ValueError, e:
print "Error when setting the value of %s: '%s'"%(input_name, str(e))
print interface.inputs
res = interface.run()
print res.outputs
def main(argv):
if len(argv) == 2 and not argv[1].startswith("-"):
listClasses(argv[1])
sys.exit(0)
parser = argparse.ArgumentParser(description='Nipype interface runner', prog=argv[0])
parser.add_argument("module", type=str, help="Module name")
parser.add_argument("interface", type=str, help="Interface name")
parsed = parser.parse_args(args=argv[1:3])
_, prog = os.path.split(argv[0])
interface_parser = argparse.ArgumentParser(description="Run %s"%parsed.interface, prog=" ".join([prog] + argv[1:3]))
interface_parser, interface = add_options(interface_parser, parsed.module, parsed.interface)
args = interface_parser.parse_args(args=argv[3:])
run_instance(interface, args)
|
{
"content_hash": "eff7cf76bc42667b244d7877cbc9b140",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 120,
"avg_line_length": 36.53947368421053,
"alnum_prop": 0.5797623334533669,
"repo_name": "glatard/nipype",
"id": "f215bfccee696ba183031e267a837c11c389e1d7",
"size": "2777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/utils/nipype_cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4606241"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
"""
Forms and validation code for user registration.
"""
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("E-mail"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
|
{
"content_hash": "da2e02a9bf7f55d365014cf2e99c8543",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 141,
"avg_line_length": 39.078125,
"alnum_prop": 0.6165533786485405,
"repo_name": "gminds/rapidnewsng",
"id": "c22627096394038ec827f812e143246634095656",
"size": "5002",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "registration/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "122626"
},
{
"name": "HTML",
"bytes": "148322"
},
{
"name": "JavaScript",
"bytes": "125770"
},
{
"name": "Python",
"bytes": "6331387"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
"""Tests for base google test functionality."""
__author__ = 'dborowitz@google.com (Dave Borowitz)'
import os
import re
import string
import sys
import unittest
import gflags as flags
from google.apputils import basetest
PY_VERSION_2 = sys.version_info[0] == 2
FLAGS = flags.FLAGS
flags.DEFINE_integer('testid', 0, 'Which test to run')
_OUTPUT_CAPTURING_CASES = [
(basetest.CaptureTestStdout, basetest.DiffTestStdout, sys.stdout),
(basetest.CaptureTestStderr, basetest.DiffTestStderr, sys.stderr),
]
class CaptureTestStdoutStderrTest(basetest.TestCase):
def setUp(self):
self.expected_filepath = os.path.join(FLAGS.test_tmpdir, 'expected_output')
def testStdoutCapturedSuccessfully(self):
for capture_output_fn, diff_output_fn, ostream in _OUTPUT_CAPTURING_CASES:
capture_output_fn()
ostream.write('This gets captured\n')
with open(self.expected_filepath, 'wb') as expected_file:
expected_file.write(b'This gets captured\n')
diff_output_fn(self.expected_filepath) # should do nothing
def testRaisesWhenCapturedStdoutDifferentThanExpected(self):
for capture_output_fn, diff_output_fn, ostream in _OUTPUT_CAPTURING_CASES:
capture_output_fn()
ostream.write('Correct captured.out\n')
with open(self.expected_filepath, 'wb') as expected_file:
expected_file.write(b'Incorrect captured.out\n')
self.assertRaises(basetest.OutputDifferedError,
diff_output_fn, self.expected_filepath)
def testStdoutNoLongerCapturedAfterDiffTest(self):
for capture_output_fn, diff_output_fn, ostream in _OUTPUT_CAPTURING_CASES:
with open(self.expected_filepath, 'wb') as expected_file:
expected_file.write(b'This goes to captured.out\n')
capture_output_fn()
ostream.write('This goes to captured.out\n')
diff_output_fn(self.expected_filepath) # should do nothing
ostream.write('This goes to stdout screen\n')
capture_output_fn()
ostream.write('This goes to captured.out\n')
diff_output_fn(self.expected_filepath) # should do nothing
def testCapturingTestStdoutReturnsContextManager(self):
for capture_output_fn, _, ostream in _OUTPUT_CAPTURING_CASES:
with open(self.expected_filepath, 'wb') as expected_file:
expected_file.write(b'This goes to captured.out\n')
ostream.write('This goes to stdout screen\n')
with capture_output_fn(
expected_output_filepath=self.expected_filepath):
ostream.write('This goes to captured.out\n')
class GoogleTestBaseUnitTest(basetest.TestCase):
def setUp(self):
self._orig_test_diff = os.environ.pop('TEST_DIFF', None)
self.data1_file = os.path.join(FLAGS.test_tmpdir, 'provided_1.dat')
self.data2_file = os.path.join(FLAGS.test_tmpdir, 'provided_2.dat')
def tearDown(self):
if self._orig_test_diff is not None:
os.environ['TEST_DIFF'] = self._orig_test_diff
def test_Diff_SameData(self):
"""Tests for the internal _Diff method."""
basetest._WriteTestData('a\nb\n', self.data1_file)
basetest._WriteTestData('a\nb\n', self.data2_file)
# This must not raise an exception:
basetest._Diff(self.data1_file, self.data2_file)
@unittest.skipIf(not os.path.exists('/usr/bin/diff'),
'requires /usr/bin/diff')
def test_Diff_SameData_ExternalDiff(self):
"""Test the internal _Diff method when TEST_DIFF is in the env."""
os.environ['TEST_DIFF'] = '/usr/bin/diff'
basetest._WriteTestData('b\n', self.data1_file)
basetest._WriteTestData('b\n', self.data2_file)
# This must not raise an exception:
basetest._Diff(self.data1_file, self.data2_file)
@unittest.skipIf(not os.path.exists('/usr/bin/diff'),
'requires /usr/bin/diff')
def test_Diff_MissingFile_ExternalDiff(self):
"""Test the internal _Diff method on TEST_DIFF error."""
os.environ['TEST_DIFF'] = '/usr/bin/diff'
basetest._WriteTestData('a\n', self.data1_file)
if os.path.exists(self.data2_file):
os.unlink(self.data2_file) # Be 100% sure this does not exist.
# This depends on /usr/bin/diff returning an exit code greater than 1
# when an input file is missing. It has had this behavior forever.
with self.assertRaises(basetest.DiffFailureError) as error_context:
basetest._Diff(self.data1_file, self.data2_file)
def test_Diff_MissingExternalDiff(self):
"""Test the internal _Diff when TEST_DIFF program is non-existant."""
os.environ['TEST_DIFF'] = self.data1_file
if os.path.exists(self.data1_file):
os.unlink(self.data1_file) # Be 100% sure this does not exist
with self.assertRaises(basetest.DiffFailureError) as error_context:
basetest._Diff(self.data2_file, self.data2_file)
def test_Diff_Exception(self):
"""Test that _Diff includes the delta in the error msg."""
basetest._WriteTestData(b'01: text A\n02: text B\n03: C', self.data1_file)
basetest._WriteTestData(b'01: text A\n02: zzzzzz\n03: C', self.data2_file)
with self.assertRaises(basetest.OutputDifferedError) as error_context:
basetest._Diff(self.data1_file, self.data2_file)
# Check that both filenames and some semblance of a unified diff
# are present in the exception error message.
diff_error_message = str(error_context.exception)
self.assertIn('provided_1', diff_error_message)
self.assertIn('provided_2', diff_error_message)
self.assertIn('@@', diff_error_message)
self.assertIn('02: text B', diff_error_message)
@unittest.skipIf(not os.path.exists('/usr/bin/diff'),
'requires /usr/bin/diff')
def test_Diff_Exception_ExternalDiff(self):
"""Test that _Diff executes TEST_DIFF when supplied and there are diffs."""
os.environ['TEST_DIFF'] = '/usr/bin/diff'
basetest._WriteTestData(b'01: text A\n02: text B\n03: C', self.data1_file)
basetest._WriteTestData(b'01: text A\n02: zzzzzz\n03: C', self.data2_file)
with self.assertRaises(basetest.OutputDifferedError) as error_context:
basetest._Diff(self.data1_file, self.data2_file)
# Check that both filenames and the TEST_DIFF command
# are present in the exception error message.
diff_error_message = str(error_context.exception)
self.assertIn('/usr/bin/diff', diff_error_message)
self.assertIn('provided_1', diff_error_message)
self.assertIn('provided_2', diff_error_message)
def testDiffTestStrings(self):
basetest.DiffTestStrings('a', 'a')
with self.assertRaises(basetest.OutputDifferedError):
basetest.DiffTestStrings(
'-2: a message\n-2: another message\n',
'-2: a message\n-2: another message \n')
self.assertRaises(basetest.DiffFailureError, basetest.DiffTestStringFile,
'a message', 'txt.a message not existant file here')
self.assertRaises(basetest.OutputDifferedError, basetest.DiffTestStringFile,
'message', os.devnull)
def testFlags(self):
if FLAGS.testid == 1:
self.assertEqual(FLAGS.test_random_seed, 301)
self.assert_(FLAGS.test_tmpdir.startswith('/'))
self.assert_(os.access(FLAGS.test_tmpdir, os.W_OK))
elif FLAGS.testid == 2:
self.assertEqual(FLAGS.test_random_seed, 321)
self.assertEqual(FLAGS.test_srcdir, 'cba')
self.assertEqual(FLAGS.test_tmpdir, 'fed')
elif FLAGS.testid == 3:
self.assertEqual(FLAGS.test_random_seed, 123)
self.assertEqual(FLAGS.test_srcdir, 'abc')
self.assertEqual(FLAGS.test_tmpdir, 'def')
elif FLAGS.testid == 4:
self.assertEqual(FLAGS.test_random_seed, 123)
self.assertEqual(FLAGS.test_srcdir, 'abc')
self.assertEqual(FLAGS.test_tmpdir, 'def')
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(AssertionError, self.assertIn, 'x', 'abc')
self.assertRaises(AssertionError, self.assertIn, 4, [1, 2, 3])
self.assertRaises(AssertionError, self.assertIn, 'elephant', animals)
self.assertRaises(AssertionError, self.assertNotIn, 'c', 'abc')
self.assertRaises(AssertionError, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(AssertionError, self.assertNotIn, 'cow', animals)
@basetest.unittest.expectedFailure
def testExpectedFailure(self):
if FLAGS.testid == 7:
self.assertEqual(1, 1) # expected failure, got success
else:
self.assertEqual(1, 2) # the expected failure
@basetest.unittest.expectedFailure
def testDifferentExpectedFailure(self):
if FLAGS.testid == 8:
self.assertEqual(1, 1) # expected failure, got success
else:
self.assertEqual(1, 2) # the expected failure
def testAssertEqual(self):
if FLAGS.testid != 5:
return
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(basetest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(basetest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(basetest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(AssertionError, self.assertListEqual, a, tuple(b))
self.assertRaises(AssertionError, self.assertTupleEqual, tuple(a), b)
self.assertRaises(AssertionError, self.assertListEqual, None, b)
self.assertRaises(AssertionError, self.assertTupleEqual, None, tuple(b))
self.assertRaises(AssertionError, self.assertSequenceEqual, None, tuple(b))
self.assertRaises(AssertionError, self.assertListEqual, 1, 1)
self.assertRaises(AssertionError, self.assertTupleEqual, 1, 1)
self.assertRaises(AssertionError, self.assertSequenceEqual, 1, 1)
self.assertSameElements([1, 2, 3], [3, 2, 1])
self.assertSameElements([1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertSameElements(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertRaises(AssertionError, self.assertSameElements, [10], [10, 11])
self.assertRaises(AssertionError, self.assertSameElements, [10, 11], [10])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertSameElements([[1, 2], [3, 4]], [[3, 4], [1, 2]])
if PY_VERSION_2:
# dict's are no longer valid for < comparison in Python 3 making them
# unsortable (yay, sanity!). But we need to preserve this old behavior
# when running under Python 2.
self.assertSameElements([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
self.assertRaises(AssertionError, self.assertSameElements, [[1]], [[2]])
def testAssertItemsEqualHotfix(self):
"""Confirm that http://bugs.python.org/issue14832 - b/10038517 is gone."""
for assert_items_method in (self.assertItemsEqual, self.assertCountEqual):
with self.assertRaises(self.failureException) as error_context:
assert_items_method([4], [2])
error_message = str(error_context.exception)
# Confirm that the bug is either no longer present in Python or that our
# assertItemsEqual patching version of the method in basetest.TestCase
# doesn't get used.
self.assertIn('First has 1, Second has 0: 4', error_message)
self.assertIn('First has 0, Second has 1: 2', error_message)
def testAssertDictEqual(self):
self.assertDictEqual({}, {})
c = {'x': 1}
d = {}
self.assertRaises(basetest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(basetest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(AssertionError, self.assertDictEqual, None, d)
self.assertRaises(AssertionError, self.assertDictEqual, [], d)
self.assertRaises(AssertionError, self.assertDictEqual, 1, 1)
try:
# Ensure we use equality as the sole measure of elements, not type, since
# that is consistent with dict equality.
self.assertDictEqual({1: 1.0, 2: 2}, {1: 1, 2: 3})
except AssertionError, e:
self.assertMultiLineEqual('{1: 1.0, 2: 2} != {1: 1, 2: 3}\n'
'repr() of differing entries:\n2: 2 != 3\n',
str(e))
try:
self.assertDictEqual({}, {'x': 1})
except AssertionError, e:
self.assertMultiLineEqual("{} != {'x': 1}\n"
"Unexpected, but present entries:\n'x': 1\n",
str(e))
else:
self.fail('Expecting AssertionError')
try:
self.assertDictEqual({}, {'x': 1}, 'a message')
except AssertionError, e:
self.assertIn('a message', str(e))
else:
self.fail('Expecting AssertionError')
expected = {'a': 1, 'b': 2, 'c': 3}
seen = {'a': 2, 'c': 3, 'd': 4}
try:
self.assertDictEqual(expected, seen)
except AssertionError, e:
self.assertMultiLineEqual("""\
{'a': 1, 'b': 2, 'c': 3} != {'a': 2, 'c': 3, 'd': 4}
Unexpected, but present entries:
'd': 4
repr() of differing entries:
'a': 1 != 2
Missing entries:
'b': 2
""", str(e))
else:
self.fail('Expecting AssertionError')
self.assertRaises(AssertionError, self.assertDictEqual, (1, 2), {})
self.assertRaises(AssertionError, self.assertDictEqual, {}, (1, 2))
# Ensure deterministic output of keys in dictionaries whose sort order
# doesn't match the lexical ordering of repr -- this is most Python objects,
# which are keyed by memory address.
class Obj(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
try:
self.assertDictEqual(
{'a': Obj('A'), Obj('b'): Obj('B'), Obj('c'): Obj('C')},
{'a': Obj('A'), Obj('d'): Obj('D'), Obj('e'): Obj('E')})
except AssertionError, e:
# Do as best we can not to be misleading when objects have the same repr
# but aren't equal.
err_str = str(e)
self.assertStartsWith(err_str,
"{'a': A, b: B, c: C} != {'a': A, d: D, e: E}\n")
self.assertRegexpMatches(err_str,
r'(?ms).*^Unexpected, but present entries:\s+'
r'^(d: D$\s+^e: E|e: E$\s+^d: D)$')
self.assertRegexpMatches(err_str,
r'(?ms).*^repr\(\) of differing entries:\s+'
r'^.a.: A != A$', err_str)
self.assertRegexpMatches(err_str,
r'(?ms).*^Missing entries:\s+'
r'^(b: B$\s+^c: C|c: C$\s+^b: B)$')
else:
self.fail('Expecting AssertionError')
# Confirm that safe_repr, not repr, is being used.
class RaisesOnRepr(object):
def __repr__(self):
return 1/0 # Intentionally broken __repr__ implementation.
try:
self.assertDictEqual(
{RaisesOnRepr(): RaisesOnRepr()},
{RaisesOnRepr(): RaisesOnRepr()}
)
self.fail('Expected dicts not to match')
except AssertionError as e:
# Depending on the testing environment, the object may get a __main__
# prefix or a basetest_test prefix, so strip that for comparison.
error_msg = re.sub(
r'( at 0x[^>]+)|__main__\.|basetest_test\.', '', str(e))
self.assertRegexpMatches(error_msg, """(?m)\
{<.*RaisesOnRepr object.*>: <.*RaisesOnRepr object.*>} != \
{<.*RaisesOnRepr object.*>: <.*RaisesOnRepr object.*>}
Unexpected, but present entries:
<.*RaisesOnRepr object.*>: <.*RaisesOnRepr object.*>
Missing entries:
<.*RaisesOnRepr object.*>: <.*RaisesOnRepr object.*>
""")
# Confirm that safe_repr, not repr, is being used.
class RaisesOnLt(object):
def __lt__(self):
raise TypeError('Object is unordered.')
def __repr__(self):
return '<RaisesOnLt object>'
try:
self.assertDictEqual(
{RaisesOnLt(): RaisesOnLt()},
{RaisesOnLt(): RaisesOnLt()})
except AssertionError as e:
self.assertIn('Unexpected, but present entries:\n<RaisesOnLt', str(e))
self.assertIn('Missing entries:\n<RaisesOnLt', str(e))
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(AssertionError, self.assertSetEqual, None, set2)
self.assertRaises(AssertionError, self.assertSetEqual, [], set2)
self.assertRaises(AssertionError, self.assertSetEqual, set1, None)
self.assertRaises(AssertionError, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(AssertionError, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(AssertionError, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(AssertionError, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = 'foo'
self.assertRaises(AssertionError, self.assertSetEqual, set1, set2)
self.assertRaises(AssertionError, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(AssertionError, self.assertSetEqual, set1, set2)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
self.assertRaises(basetest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 2}, {'a': 1},
'.*Mismatched values:.*')
self.assertRaises(basetest.TestCase.failureException,
self.assertDictContainsSubset, {'c': 1}, {'a': 1},
'.*Missing:.*')
self.assertRaises(basetest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1}, {'a': 1},
'.*Missing:.*')
self.assertRaises(basetest.TestCase.failureException,
self.assertDictContainsSubset, {'a': 1, 'c': 1}, {'a': 1},
'.*Missing:.*Mismatched values:.*')
def testAssertContainsSubset(self):
# sets, lists, tuples, dicts all ok. Types of set and subset do not have to
# match.
actual = ('a', 'b', 'c')
self.assertContainsSubset({'a', 'b'}, actual)
self.assertContainsSubset(('b', 'c'), actual)
self.assertContainsSubset({'b': 1, 'c': 2}, list(actual))
self.assertContainsSubset(['c', 'a'], set(actual))
self.assertContainsSubset([], set())
self.assertContainsSubset([], {'a': 1})
self.assertRaises(AssertionError, self.assertContainsSubset, ('d',), actual)
self.assertRaises(AssertionError, self.assertContainsSubset, ['d'],
set(actual))
self.assertRaises(AssertionError, self.assertContainsSubset, {'a': 1}, [])
self.assertRaisesWithRegexpMatch(AssertionError, 'Missing elements',
self.assertContainsSubset, {1, 2, 3},
{1, 2})
self.assertRaisesWithRegexpMatch(
AssertionError, 'Custom message: Missing elements',
self.assertContainsSubset, {1, 2}, {1}, 'Custom message')
def testAssertNoCommonElements(self):
actual = ('a', 'b', 'c')
self.assertNoCommonElements((), actual)
self.assertNoCommonElements(('d', 'e'), actual)
self.assertNoCommonElements({'d', 'e'}, actual)
self.assertRaisesWithRegexpMatch(
AssertionError, 'Custom message: Common elements',
self.assertNoCommonElements, {1, 2}, {1}, 'Custom message')
with self.assertRaises(AssertionError):
self.assertNoCommonElements(['a'], actual)
with self.assertRaises(AssertionError):
self.assertNoCommonElements({'a', 'b', 'c'}, actual)
with self.assertRaises(AssertionError):
self.assertNoCommonElements({'b', 'c'}, set(actual))
def testAssertAlmostEqual(self):
if FLAGS.testid != 6:
return
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
def testAssertAlmostEqualsWithDelta(self):
self.assertAlmostEquals(3.14, 3, delta=0.2)
self.assertAlmostEquals(2.81, 3.14, delta=1)
self.assertAlmostEquals(-1, 1, delta=3)
self.assertRaises(AssertionError, self.assertAlmostEquals,
3.14, 2.81, delta=0.1)
self.assertRaises(AssertionError, self.assertAlmostEquals,
1, 2, delta=0.5)
self.assertNotAlmostEquals(3.14, 2.81, delta=0.1)
def testGetCommandString_listOfStringArgument(self):
expected = "'command' 'arg-0'"
observed = basetest.GetCommandString(['command', 'arg-0'])
self.assertEqual(expected, observed)
def testGetCommandString_listOfUnicodeStringArgument(self):
expected = "'command' 'arg-0'"
observed = basetest.GetCommandString([u'command', u'arg-0'])
self.assertEqual(expected, observed)
def testGetCommandString_stringArgument(self):
expected = 'command arg-0'
observed = basetest.GetCommandString('command arg-0')
self.assertEqual(expected, observed)
def testGetCommandString_unicodeStringArgument(self):
expected = 'command arg-0'
observed = basetest.GetCommandString(u'command arg-0')
self.assertEqual(expected, observed)
def testAssertStartsWith(self):
self.assertStartsWith('foobar', 'foo')
self.assertStartsWith('foobar', 'foobar')
self.assertRaises(AssertionError, self.assertStartsWith, 'foobar', 'bar')
self.assertRaises(AssertionError, self.assertStartsWith, 'foobar', 'blah')
def testAssertNotStartsWith(self):
self.assertNotStartsWith('foobar', 'bar')
self.assertNotStartsWith('foobar', 'blah')
self.assertRaises(AssertionError, self.assertNotStartsWith, 'foobar', 'foo')
self.assertRaises(AssertionError, self.assertNotStartsWith, 'foobar',
'foobar')
def testAssertEndsWith(self):
self.assertEndsWith('foobar', 'bar')
self.assertEndsWith('foobar', 'foobar')
self.assertRaises(AssertionError, self.assertEndsWith, 'foobar', 'foo')
self.assertRaises(AssertionError, self.assertEndsWith, 'foobar', 'blah')
def testAssertNotEndsWith(self):
self.assertNotEndsWith('foobar', 'foo')
self.assertNotEndsWith('foobar', 'blah')
self.assertRaises(AssertionError, self.assertNotEndsWith, 'foobar', 'bar')
self.assertRaises(AssertionError, self.assertNotEndsWith, 'foobar',
'foobar')
def testAssertRegexMatch_matches(self):
self.assertRegexMatch('str', ['str'])
def testAssertRegexMatch_matchesSubstring(self):
self.assertRegexMatch('pre-str-post', ['str'])
def testAssertRegexMatch_multipleRegexMatches(self):
self.assertRegexMatch('str', ['rts', 'str'])
def testAssertRegexMatch_emptyListFails(self):
expected_re = re.compile(r'No regexes specified\.', re.MULTILINE)
self.assertRaisesWithRegexpMatch(
AssertionError,
expected_re,
self.assertRegexMatch,
'str',
regexes=[])
def testAssertRegexMatch_badArguments(self):
self.assertRaisesWithRegexpMatch(
AssertionError,
'regexes is a string;.*',
self.assertRegexMatch, '1.*2', '1 2')
def testAssertRegexMatch_unicodeVsBytes(self):
"""Ensure proper utf-8 encoding or decoding happens automatically."""
self.assertRegexMatch(u'str', [b'str'])
self.assertRegexMatch(b'str', [u'str'])
def testAssertRegexMatch_unicode(self):
self.assertRegexMatch(u'foo str', [u'str'])
def testAssertRegexMatch_bytes(self):
self.assertRegexMatch(b'foo str', [b'str'])
def testAssertRegexMatch_allTheSameType(self):
self.assertRaisesWithRegexpMatch(
AssertionError, 'regexes .* same type',
self.assertRegexMatch, 'foo str', [b'str', u'foo'])
def testAssertCommandFailsStderr(self):
# TODO(user): Gross! These should use sys.executable instead of
# depending on /usr/bin/perl existing.
self.assertCommandFails(
['/usr/bin/perl', '-e', 'die "FAIL";'],
[r'(.|\n)*FAIL at -e line 1\.'])
def testAssertCommandFailsWithListOfString(self):
self.assertCommandFails(['false'], [''])
def testAssertCommandFailsWithListOfUnicodeString(self):
self.assertCommandFails([u'false'], [''])
def testAssertCommandFailsWithUnicodeString(self):
self.assertCommandFails(u'false', [u''])
def testAssertCommandFailsWithUnicodeStringBytesRegex(self):
self.assertCommandFails(u'false', [b''])
def testAssertCommandSucceedsStderr(self):
expected_re = re.compile(r'(.|\n)*FAIL at -e line 1\.', re.MULTILINE)
self.assertRaisesWithRegexpMatch(
AssertionError,
expected_re,
self.assertCommandSucceeds,
['/usr/bin/perl', '-e', 'die "FAIL";'])
def testAssertCommandSucceedsWithMatchingUnicodeRegexes(self):
self.assertCommandSucceeds(['echo', 'SUCCESS'], regexes=[u'SUCCESS'])
def testAssertCommandSucceedsWithMatchingBytesRegexes(self):
self.assertCommandSucceeds(['echo', 'SUCCESS'], regexes=[b'SUCCESS'])
def testAssertCommandSucceedsWithNonMatchingRegexes(self):
expected_re = re.compile(r'Running command', re.MULTILINE)
self.assertRaisesWithRegexpMatch(
AssertionError,
expected_re,
self.assertCommandSucceeds,
['echo', 'FAIL'],
regexes=['SUCCESS'])
def testAssertCommandSucceedsWithListOfString(self):
self.assertCommandSucceeds(['true'])
def testAssertCommandSucceedsWithListOfUnicodeString(self):
self.assertCommandSucceeds([u'true'])
def testAssertCommandSucceedsWithUnicodeString(self):
self.assertCommandSucceeds(u'true')
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(AssertionError, self.assertGreater, 1, 2)
self.assertRaises(AssertionError, self.assertGreater, 1, 1)
self.assertRaises(AssertionError, self.assertGreaterEqual, 1, 2)
self.assertRaises(AssertionError, self.assertLess, 2, 1)
self.assertRaises(AssertionError, self.assertLess, 1, 1)
self.assertRaises(AssertionError, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(AssertionError, self.assertGreater, 1.0, 1.1)
self.assertRaises(AssertionError, self.assertGreater, 1.0, 1.0)
self.assertRaises(AssertionError, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(AssertionError, self.assertLess, 1.1, 1.0)
self.assertRaises(AssertionError, self.assertLess, 1.0, 1.0)
self.assertRaises(AssertionError, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(AssertionError, self.assertGreater, 'ant', 'bug')
self.assertRaises(AssertionError, self.assertGreater, 'ant', 'ant')
self.assertRaises(AssertionError, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(AssertionError, self.assertLess, 'bug', 'ant')
self.assertRaises(AssertionError, self.assertLess, 'ant', 'ant')
self.assertRaises(AssertionError, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(AssertionError, self.assertGreater, u'ant', u'bug')
self.assertRaises(AssertionError, self.assertGreater, u'ant', u'ant')
self.assertRaises(AssertionError, self.assertGreaterEqual, u'ant', u'bug')
self.assertRaises(AssertionError, self.assertLess, u'bug', u'ant')
self.assertRaises(AssertionError, self.assertLess, u'ant', u'ant')
self.assertRaises(AssertionError, self.assertLessEqual, u'bug', u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(AssertionError, self.assertGreater, 'ant', u'bug')
self.assertRaises(AssertionError, self.assertGreater, u'ant', 'bug')
self.assertRaises(AssertionError, self.assertGreater, 'ant', u'ant')
self.assertRaises(AssertionError, self.assertGreater, u'ant', 'ant')
self.assertRaises(AssertionError, self.assertGreaterEqual, 'ant', u'bug')
self.assertRaises(AssertionError, self.assertGreaterEqual, u'ant', 'bug')
self.assertRaises(AssertionError, self.assertLess, 'bug', u'ant')
self.assertRaises(AssertionError, self.assertLess, u'bug', 'ant')
self.assertRaises(AssertionError, self.assertLess, 'ant', u'ant')
self.assertRaises(AssertionError, self.assertLess, u'ant', 'ant')
self.assertRaises(AssertionError, self.assertLessEqual, 'bug', u'ant')
self.assertRaises(AssertionError, self.assertLessEqual, u'bug', 'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
for type1 in (str, unicode):
for type2 in (str, unicode):
self.assertRaisesWithLiteralMatch(AssertionError, sample_text_error,
self.assertMultiLineEqual,
type1(sample_text),
type2(revised_sample_text))
self.assertRaises(AssertionError, self.assertMultiLineEqual, (1, 2), 'str')
self.assertRaises(AssertionError, self.assertMultiLineEqual, 'str', (1, 2))
def testAssertMultiLineEqualAddsNewlinesIfNeeded(self):
self.assertRaisesWithLiteralMatch(
AssertionError,
'\n'
' line1\n'
'- line2\n'
'? ^\n'
'+ line3\n'
'? ^\n',
self.assertMultiLineEqual,
'line1\n'
'line2',
'line1\n'
'line3')
def testAssertMultiLineEqualShowsMissingNewlines(self):
self.assertRaisesWithLiteralMatch(
AssertionError,
'\n'
' line1\n'
'- line2\n'
'? -\n'
'+ line2\n',
self.assertMultiLineEqual,
'line1\n'
'line2\n',
'line1\n'
'line2')
def testAssertMultiLineEqualShowsExtraNewlines(self):
self.assertRaisesWithLiteralMatch(
AssertionError,
'\n'
' line1\n'
'- line2\n'
'+ line2\n'
'? +\n',
self.assertMultiLineEqual,
'line1\n'
'line2',
'line1\n'
'line2\n')
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(AssertionError, self.assertIsNone, False)
self.assertIsNotNone('Google')
self.assertRaises(AssertionError, self.assertIsNotNone, None)
self.assertRaises(AssertionError, self.assertIsNone, (1, 2))
def testAssertIs(self):
self.assertIs(object, object)
self.assertRaises(AssertionError, self.assertIsNot, object, object)
self.assertIsNot(True, False)
self.assertRaises(AssertionError, self.assertIs, True, False)
def testAssertBetween(self):
self.assertBetween(3.14, 3.1, 3.141)
self.assertBetween(4, 4, 1e10000)
self.assertBetween(9.5, 9.4, 9.5)
self.assertBetween(-1e10, -1e10000, 0)
self.assertRaises(AssertionError, self.assertBetween, 9.4, 9.3, 9.3999)
self.assertRaises(AssertionError, self.assertBetween, -1e10000, -1e10, 0)
def testAssertRaisesWithPredicateMatch_noRaiseFails(self):
with self.assertRaisesRegexp(AssertionError, '^Exception not raised$'):
self.assertRaisesWithPredicateMatch(Exception,
lambda e: True,
lambda: 1) # don't raise
with self.assertRaisesRegexp(AssertionError, '^Exception not raised$'):
with self.assertRaisesWithPredicateMatch(Exception, lambda e: True):
pass # don't raise
def testAssertRaisesWithPredicateMatch_raisesWrongExceptionFails(self):
def _RaiseValueError():
raise ValueError
with self.assertRaises(ValueError):
self.assertRaisesWithPredicateMatch(IOError,
lambda e: True,
_RaiseValueError)
with self.assertRaises(ValueError):
with self.assertRaisesWithPredicateMatch(IOError, lambda e: True):
raise ValueError
def testAssertRaisesWithPredicateMatch_predicateFails(self):
def _RaiseValueError():
raise ValueError
with self.assertRaisesRegexp(AssertionError, ' does not match predicate '):
self.assertRaisesWithPredicateMatch(ValueError,
lambda e: False,
_RaiseValueError)
with self.assertRaisesRegexp(AssertionError, ' does not match predicate '):
with self.assertRaisesWithPredicateMatch(ValueError, lambda e: False):
raise ValueError
def testAssertRaisesWithPredicateMatch_predicatePasses(self):
def _RaiseValueError():
raise ValueError
self.assertRaisesWithPredicateMatch(ValueError,
lambda e: True,
_RaiseValueError)
with self.assertRaisesWithPredicateMatch(ValueError, lambda e: True):
raise ValueError
def testAssertRaisesWithRegexpMatch(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesWithRegexpMatch(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesWithRegexpMatch(ExceptionMock, 'expect$', Stub)
self.assertRaisesWithRegexpMatch(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesWithRegexpMatch(self):
self.assertRaisesWithRegexpMatch(
AssertionError, '^Exception not raised',
self.assertRaisesWithRegexpMatch, Exception, re.compile('x'),
lambda: None)
self.assertRaisesWithRegexpMatch(
AssertionError, '^Exception not raised',
self.assertRaisesWithRegexpMatch, Exception, 'x', lambda: None)
self.assertRaisesWithRegexpMatch(
AssertionError, '^Exception not raised',
self.assertRaisesWithRegexpMatch, Exception, u'x', lambda: None)
def testAssertRaisesWithRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesWithRegexpMatch(
AssertionError, r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesWithRegexpMatch, Exception, r'^Expected$', Stub)
self.assertRaisesWithRegexpMatch(
AssertionError, r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesWithRegexpMatch, Exception, r'^Expected$', Stub)
def testAssertContainsInOrder(self):
# Valids
self.assertContainsInOrder(
['fox', 'dog'], 'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
['quick', 'fox', 'dog'],
'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
['The', 'fox', 'dog.'], 'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
['fox'], 'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
'fox', 'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
['fox', 'dog'], 'fox dog fox')
self.assertContainsInOrder(
[], 'The quick brown fox jumped over the lazy dog.')
self.assertContainsInOrder(
[], '')
# Invalids
self.assertRaises(
AssertionError, self.assertContainsInOrder,
['dog', 'fox'], 'The quick brown fox jumped over the lazy dog')
self.assertRaises(
AssertionError, self.assertContainsInOrder,
['The', 'dog', 'fox'], 'The quick brown fox jumped over the lazy dog')
self.assertRaises(
AssertionError, self.assertContainsInOrder, ['dog'], '')
def testAssertContainsSubsequenceForNumbers(self):
self.assertContainsSubsequence([1, 2, 3], [1])
self.assertContainsSubsequence([1, 2, 3], [1, 2])
self.assertContainsSubsequence([1, 2, 3], [1, 3])
with self.assertRaises(AssertionError):
self.assertContainsSubsequence([1, 2, 3], [4])
with self.assertRaises(AssertionError):
self.assertContainsSubsequence([1, 2, 3], [3, 1])
def testAssertContainsSubsequenceForStrings(self):
self.assertContainsSubsequence(['foo', 'bar', 'blorp'], ['foo', 'blorp'])
with self.assertRaises(AssertionError):
self.assertContainsSubsequence(
['foo', 'bar', 'blorp'], ['blorp', 'foo'])
def testAssertContainsSubsequenceWithEmptySubsequence(self):
self.assertContainsSubsequence([1, 2, 3], [])
self.assertContainsSubsequence(['foo', 'bar', 'blorp'], [])
self.assertContainsSubsequence([], [])
def testAssertContainsSubsequenceWithEmptyContainer(self):
with self.assertRaises(AssertionError):
self.assertContainsSubsequence([], [1])
with self.assertRaises(AssertionError):
self.assertContainsSubsequence([], ['foo'])
def testAssertTotallyOrdered(self):
# Valid.
self.assertTotallyOrdered()
self.assertTotallyOrdered([1])
self.assertTotallyOrdered([1], [2])
self.assertTotallyOrdered([1, 1, 1])
self.assertTotallyOrdered([(1, 1)], [(1, 2)], [(2, 1)])
if PY_VERSION_2:
# In Python 3 comparing different types of elements is not supported.
self.assertTotallyOrdered([None], [1], [2])
self.assertTotallyOrdered([1, 1, 1], ['a string'])
# From the docstring.
class A(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(self.x)
def __repr__(self):
return 'A(%r, %r)' % (self.x, self.y)
def __eq__(self, other):
try:
return self.x == other.x
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return self.x != other.x
except AttributeError:
return NotImplemented
def __lt__(self, other):
try:
return self.x < other.x
except AttributeError:
return NotImplemented
def __le__(self, other):
try:
return self.x <= other.x
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return self.x > other.x
except AttributeError:
return NotImplemented
def __ge__(self, other):
try:
return self.x >= other.x
except AttributeError:
return NotImplemented
if PY_VERSION_2:
self.assertTotallyOrdered(
[None], # None should come before everything else.
[1], # Integers sort earlier.
[A(1, 'a')],
[A(2, 'b')], # 2 is after 1.
[A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
[A(4, 'z')],
['foo']) # Strings sort last.
else:
# Python 3 does not define ordering across different types.
self.assertTotallyOrdered(
[A(1, 'a')],
[A(2, 'b')], # 2 is after 1.
[A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
[A(4, 'z')])
# Invalid.
self.assertRaises(AssertionError, self.assertTotallyOrdered, [2], [1])
self.assertRaises(AssertionError, self.assertTotallyOrdered, [2], [1], [3])
self.assertRaises(AssertionError, self.assertTotallyOrdered, [1, 2])
def testShortDescriptionWithoutDocstring(self):
self.assertEquals(
self.shortDescription(),
('testShortDescriptionWithoutDocstring '
'(%s.GoogleTestBaseUnitTest)' % __name__))
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEquals(
self.shortDescription(),
('testShortDescriptionWithOneLineDocstring '
'(%s.GoogleTestBaseUnitTest)\n'
'Tests shortDescription() for a method with a docstring.' % __name__))
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEquals(
self.shortDescription(),
('testShortDescriptionWithMultiLineDocstring '
'(%s.GoogleTestBaseUnitTest)\n'
'Tests shortDescription() for a method with a longer docstring.'
% __name__))
def testRecordedProperties(self):
"""Tests that a test can record a property and then retrieve it."""
self.recordProperty('test_property', 'test_value')
self.assertEquals(self.getRecordedProperties(),
{'test_property': 'test_value'})
def testAssertUrlEqualSame(self):
self.assertUrlEqual('http://a', 'http://a')
self.assertUrlEqual('http://a/path/test', 'http://a/path/test')
self.assertUrlEqual('#fragment', '#fragment')
self.assertUrlEqual('http://a/?q=1', 'http://a/?q=1')
self.assertUrlEqual('http://a/?q=1&v=5', 'http://a/?v=5&q=1')
self.assertUrlEqual('/logs?v=1&a=2&t=labels&f=path%3A%22foo%22',
'/logs?a=2&f=path%3A%22foo%22&v=1&t=labels')
self.assertUrlEqual('http://a/path;p1', 'http://a/path;p1')
self.assertUrlEqual('http://a/path;p2;p3;p1', 'http://a/path;p1;p2;p3')
self.assertUrlEqual('sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15',
'sip:alice@atlanta.com;ttl=15;maddr=239.255.255.1')
self.assertUrlEqual('http://nyan/cat?p=1&b=', 'http://nyan/cat?b=&p=1')
def testAssertUrlEqualDifferent(self):
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a', 'http://b')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a/x', 'http://a:8080/x')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a/x', 'http://a/y')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a/?q=2', 'http://a/?q=1')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a/?q=1&v=5', 'http://a/?v=2&q=1')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a', 'sip://b')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a#g', 'sip://a#f')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://a/path;p1;p3;p1', 'http://a/path;p1;p2;p3')
self.assertRaises(AssertionError, self.assertUrlEqual,
'http://nyan/cat?p=1&b=', 'http://nyan/cat?p=1')
def testSameStructure_same(self):
self.assertSameStructure(0, 0)
self.assertSameStructure(1, 1)
self.assertSameStructure('', '')
self.assertSameStructure('hello', 'hello', msg='This Should not fail')
self.assertSameStructure(set(), set())
self.assertSameStructure(set([1, 2]), set([1, 2]))
self.assertSameStructure([], [])
self.assertSameStructure(['a'], ['a'])
self.assertSameStructure({}, {})
self.assertSameStructure({'one': 1}, {'one': 1})
# int and long should always be treated as the same type.
self.assertSameStructure({3L: 3}, {3: 3L})
def testSameStructure_different(self):
# Different type
self.assertRaisesWithRegexpMatch(
AssertionError,
r"a is a <(type|class) 'int'> but b is a <(type|class) 'str'>",
self.assertSameStructure, 0, 'hello')
self.assertRaisesWithRegexpMatch(
AssertionError,
r"a is a <(type|class) 'int'> but b is a <(type|class) 'list'>",
self.assertSameStructure, 0, [])
self.assertRaisesWithRegexpMatch(
AssertionError,
r"a is a <(type|class) 'int'> but b is a <(type|class) 'float'>",
self.assertSameStructure, 2, 2.0)
# Different scalar values
self.assertRaisesWithLiteralMatch(
AssertionError, 'a is 0 but b is 1',
self.assertSameStructure, 0, 1)
self.assertRaisesWithLiteralMatch(
AssertionError, "a is 'hello' but b is 'goodbye': This was expected",
self.assertSameStructure, 'hello', 'goodbye', msg='This was expected')
# Different sets are treated without structure
self.assertRaisesWithRegexpMatch(
AssertionError, r'AA is (set\(\[1\]\)|\{1\}) but BB is set\((\[\])?\)',
self.assertSameStructure, set([1]), set(), aname='AA', bname='BB')
# Different lists
self.assertRaisesWithLiteralMatch(
AssertionError, 'a has [2] but b does not',
self.assertSameStructure, ['x', 'y', 'z'], ['x', 'y'])
self.assertRaisesWithLiteralMatch(
AssertionError, 'a lacks [2] but b has it',
self.assertSameStructure, ['x', 'y'], ['x', 'y', 'z'])
self.assertRaisesWithLiteralMatch(
AssertionError, "a[2] is 'z' but b[2] is 'Z'",
self.assertSameStructure, ['x', 'y', 'z'], ['x', 'y', 'Z'])
# Different dicts
self.assertRaisesWithLiteralMatch(
AssertionError, "a has ['two'] but b does not",
self.assertSameStructure, {'one': 1, 'two': 2}, {'one': 1})
self.assertRaisesWithLiteralMatch(
AssertionError, "a lacks ['two'] but b has it",
self.assertSameStructure, {'one': 1}, {'one': 1, 'two': 2})
self.assertRaisesWithLiteralMatch(
AssertionError, "a['two'] is 2 but b['two'] is 3",
self.assertSameStructure, {'one': 1, 'two': 2}, {'one': 1, 'two': 3})
# Deep key generation
self.assertRaisesWithLiteralMatch(
AssertionError,
"a[0][0]['x']['y']['z'][0] is 1 but b[0][0]['x']['y']['z'][0] is 2",
self.assertSameStructure,
[[{'x': {'y': {'z': [1]}}}]], [[{'x': {'y': {'z': [2]}}}]])
# Multiple problems
self.assertRaisesWithLiteralMatch(
AssertionError,
'a[0] is 1 but b[0] is 3; a[1] is 2 but b[1] is 4',
self.assertSameStructure, [1, 2], [3, 4])
self.assertRaisesWithRegexpMatch(
AssertionError,
re.compile(r"^a\[0] is 'a' but b\[0] is 'A'; .*"
r"a\[18] is 's' but b\[18] is 'S'; \.\.\.$"),
self.assertSameStructure,
list(string.ascii_lowercase), list(string.ascii_uppercase))
def testAssertJsonEqualSame(self):
self.assertJsonEqual('{"success": true}', '{"success": true}')
self.assertJsonEqual('{"success": true}', '{"success":true}')
self.assertJsonEqual('true', 'true')
self.assertJsonEqual('null', 'null')
self.assertJsonEqual('false', 'false')
self.assertJsonEqual('34', '34')
self.assertJsonEqual('[1, 2, 3]', '[1,2,3]', msg='please PASS')
self.assertJsonEqual('{"sequence": [1, 2, 3], "float": 23.42}',
'{"float": 23.42, "sequence": [1,2,3]}')
self.assertJsonEqual('{"nest": {"spam": "eggs"}, "float": 23.42}',
'{"float": 23.42, "nest": {"spam":"eggs"}}')
def testAssertJsonEqualDifferent(self):
with self.assertRaises(AssertionError):
self.assertJsonEqual('{"success": true}', '{"success": false}')
with self.assertRaises(AssertionError):
self.assertJsonEqual('{"success": false}', '{"Success": false}')
with self.assertRaises(AssertionError):
self.assertJsonEqual('false', 'true')
with self.assertRaises(AssertionError) as error_context:
self.assertJsonEqual('null', '0', msg='I demand FAILURE')
self.assertIn('I demand FAILURE', error_context.exception.args[0])
self.assertIn('None', error_context.exception.args[0])
with self.assertRaises(AssertionError):
self.assertJsonEqual('[1, 0, 3]', '[1,2,3]')
with self.assertRaises(AssertionError):
self.assertJsonEqual('{"sequence": [1, 2, 3], "float": 23.42}',
'{"float": 23.42, "sequence": [1,0,3]}')
with self.assertRaises(AssertionError):
self.assertJsonEqual('{"nest": {"spam": "eggs"}, "float": 23.42}',
'{"float": 23.42, "nest": {"Spam":"beans"}}')
def testAssertJsonEqualBadJson(self):
with self.assertRaises(ValueError) as error_context:
self.assertJsonEqual("alhg'2;#", '{"a": true}')
self.assertIn('first', error_context.exception.args[0])
self.assertIn('alhg', error_context.exception.args[0])
with self.assertRaises(ValueError) as error_context:
self.assertJsonEqual('{"a": true}', "alhg'2;#")
self.assertIn('second', error_context.exception.args[0])
self.assertIn('alhg', error_context.exception.args[0])
with self.assertRaises(ValueError) as error_context:
self.assertJsonEqual('', '')
class GetCommandStderrTestCase(basetest.TestCase):
def setUp(self):
self.original_environ = os.environ.copy()
def tearDown(self):
os.environ = self.original_environ
def testReturnStatus(self):
expected = 255
observed = (
basetest.GetCommandStderr(
['/usr/bin/perl', '-e', 'die "FAIL";'],
None)[0])
self.assertEqual(expected, observed)
# TODO(dborowitz): Tests for more functionality that do not deal with
# PYTHON_RUNFILES.
class EqualityAssertionTest(basetest.TestCase):
"""This test verifies that basetest.failIfEqual actually tests __ne__.
If a user class implements __eq__, unittest.failUnlessEqual will call it
via first == second. However, failIfEqual also calls
first == second. This means that while the caller may believe
their __ne__ method is being tested, it is not.
"""
class NeverEqual(object):
"""Objects of this class behave like NaNs."""
def __eq__(self, unused_other):
return False
def __ne__(self, unused_other):
return False
class AllSame(object):
"""All objects of this class compare as equal."""
def __eq__(self, unused_other):
return True
def __ne__(self, unused_other):
return False
class EqualityTestsWithEq(object):
"""Performs all equality and inequality tests with __eq__."""
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value == other._value
def __ne__(self, other):
return not self.__eq__(other)
class EqualityTestsWithNe(object):
"""Performs all equality and inequality tests with __ne__."""
def __init__(self, value):
self._value = value
def __eq__(self, other):
return not self.__ne__(other)
def __ne__(self, other):
return self._value != other._value
class EqualityTestsWithCmp(object):
def __init__(self, value):
self._value = value
def __cmp__(self, other):
return cmp(self._value, other._value)
class EqualityTestsWithLtEq(object):
def __init__(self, value):
self._value = value
def __eq__(self, other):
return self._value == other._value
def __lt__(self, other):
return self._value < other._value
def testAllComparisonsFail(self):
i1 = self.NeverEqual()
i2 = self.NeverEqual()
self.assertFalse(i1 == i2)
self.assertFalse(i1 != i2)
# Compare two distinct objects
self.assertFalse(i1 is i2)
self.assertRaises(AssertionError, self.assertEqual, i1, i2)
self.assertRaises(AssertionError, self.assertEquals, i1, i2)
self.assertRaises(AssertionError, self.failUnlessEqual, i1, i2)
self.assertRaises(AssertionError, self.assertNotEqual, i1, i2)
self.assertRaises(AssertionError, self.assertNotEquals, i1, i2)
self.assertRaises(AssertionError, self.failIfEqual, i1, i2)
# A NeverEqual object should not compare equal to itself either.
i2 = i1
self.assertTrue(i1 is i2)
self.assertFalse(i1 == i2)
self.assertFalse(i1 != i2)
self.assertRaises(AssertionError, self.assertEqual, i1, i2)
self.assertRaises(AssertionError, self.assertEquals, i1, i2)
self.assertRaises(AssertionError, self.failUnlessEqual, i1, i2)
self.assertRaises(AssertionError, self.assertNotEqual, i1, i2)
self.assertRaises(AssertionError, self.assertNotEquals, i1, i2)
self.assertRaises(AssertionError, self.failIfEqual, i1, i2)
def testAllComparisonsSucceed(self):
a = self.AllSame()
b = self.AllSame()
self.assertFalse(a is b)
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertEqual(a, b)
self.assertEquals(a, b)
self.failUnlessEqual(a, b)
self.assertRaises(AssertionError, self.assertNotEqual, a, b)
self.assertRaises(AssertionError, self.assertNotEquals, a, b)
self.assertRaises(AssertionError, self.failIfEqual, a, b)
def _PerformAppleAppleOrangeChecks(self, same_a, same_b, different):
"""Perform consistency checks with two apples and an orange.
The two apples should always compare as being the same (and inequality
checks should fail). The orange should always compare as being different
to each of the apples.
Args:
same_a: the first apple
same_b: the second apple
different: the orange
"""
self.assertTrue(same_a == same_b)
self.assertFalse(same_a != same_b)
self.assertEqual(same_a, same_b)
self.assertEquals(same_a, same_b)
self.failUnlessEqual(same_a, same_b)
if PY_VERSION_2:
# Python 3 removes the global cmp function
self.assertEqual(0, cmp(same_a, same_b))
self.assertFalse(same_a == different)
self.assertTrue(same_a != different)
self.assertNotEqual(same_a, different)
self.assertNotEquals(same_a, different)
self.failIfEqual(same_a, different)
if PY_VERSION_2:
self.assertNotEqual(0, cmp(same_a, different))
self.assertFalse(same_b == different)
self.assertTrue(same_b != different)
self.assertNotEqual(same_b, different)
self.assertNotEquals(same_b, different)
self.failIfEqual(same_b, different)
if PY_VERSION_2:
self.assertNotEqual(0, cmp(same_b, different))
def testComparisonWithEq(self):
same_a = self.EqualityTestsWithEq(42)
same_b = self.EqualityTestsWithEq(42)
different = self.EqualityTestsWithEq(1769)
self._PerformAppleAppleOrangeChecks(same_a, same_b, different)
def testComparisonWithNe(self):
same_a = self.EqualityTestsWithNe(42)
same_b = self.EqualityTestsWithNe(42)
different = self.EqualityTestsWithNe(1769)
self._PerformAppleAppleOrangeChecks(same_a, same_b, different)
def testComparisonWithCmpOrLtEq(self):
if PY_VERSION_2:
# In Python 3; the __cmp__ method is no longer special.
cmp_or_lteq_class = self.EqualityTestsWithCmp
else:
cmp_or_lteq_class = self.EqualityTestsWithLtEq
same_a = cmp_or_lteq_class(42)
same_b = cmp_or_lteq_class(42)
different = cmp_or_lteq_class(1769)
self._PerformAppleAppleOrangeChecks(same_a, same_b, different)
class AssertSequenceStartsWithTest(basetest.TestCase):
def setUp(self):
self.a = [5, 'foo', {'c': 'd'}, None]
def testEmptySequenceStartsWithEmptyPrefix(self):
self.assertSequenceStartsWith([], ())
def testSequencePrefixIsAnEmptyList(self):
self.assertSequenceStartsWith([[]], ([], 'foo'))
def testRaiseIfEmptyPrefixWithNonEmptyWhole(self):
self.assertRaisesWithRegexpMatch(
AssertionError,
'Prefix length is 0 but whole length is %d: %s' % (
len(self.a), '\[5, \'foo\', \{\'c\': \'d\'\}, None\]'),
self.assertSequenceStartsWith, [], self.a)
def testSingleElementPrefix(self):
self.assertSequenceStartsWith([5], self.a)
def testTwoElementPrefix(self):
self.assertSequenceStartsWith((5, 'foo'), self.a)
def testPrefixIsFullSequence(self):
self.assertSequenceStartsWith([5, 'foo', {'c': 'd'}, None], self.a)
def testStringPrefix(self):
self.assertSequenceStartsWith('abc', 'abc123')
def testConvertNonSequencePrefixToSequenceAndTryAgain(self):
self.assertSequenceStartsWith(5, self.a)
def testWholeNotASequence(self):
msg = ('For whole: len\(5\) is not supported, it appears to be type: '
'<(type|class) \'int\'>')
self.assertRaisesWithRegexpMatch(AssertionError, msg,
self.assertSequenceStartsWith, self.a, 5)
def testRaiseIfSequenceDoesNotStartWithPrefix(self):
msg = ('prefix: \[\'foo\', \{\'c\': \'d\'\}\] not found at start of whole: '
'\[5, \'foo\', \{\'c\': \'d\'\}, None\].')
self.assertRaisesWithRegexpMatch(
AssertionError, msg, self.assertSequenceStartsWith, ['foo', {'c': 'd'}],
self.a)
def testRaiseIfTypesArNotSupported(self):
self.assertRaisesWithRegexpMatch(
TypeError, 'unhashable type', self.assertSequenceStartsWith,
{'a': 1, 2: 'b'}, {'a': 1, 2: 'b', 'c': '3'})
class InitNotNecessaryForAssertsTest(basetest.TestCase):
"""TestCase assertions should work even if __init__ wasn't correctly called.
This is a hack, see comment in
basetest.TestCase._getAssertEqualityFunc. We know that not calling
__init__ of a superclass is a bad thing, but people keep doing them,
and this (even if a little bit dirty) saves them from shooting
themselves in the foot.
"""
def testSubclass(self):
class Subclass(basetest.TestCase):
def __init__(self): # pylint: disable=super-init-not-called
pass
Subclass().assertEquals({}, {})
def testMultipleInheritance(self):
class Foo(object):
def __init__(self, *args, **kwargs):
pass
class Subclass(Foo, basetest.TestCase):
pass
Subclass().assertEquals({}, {})
if __name__ == '__main__':
basetest.main()
|
{
"content_hash": "e6be66f55442aedcc2b1c10e30bc603a",
"timestamp": "",
"source": "github",
"line_count": 1570,
"max_line_length": 80,
"avg_line_length": 38.18789808917197,
"alnum_prop": 0.6482361771328496,
"repo_name": "LeslieW/apputils",
"id": "426eed7325681994653e4faa46ead974449b91f1",
"size": "60575",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/basetest_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "291137"
},
{
"name": "Shell",
"bytes": "19708"
}
],
"symlink_target": ""
}
|
import unittest
from rdlmpy import RDLMClient
from rdlmpy import RDLMLockWaitExceededException, RDLMLockDeletedException, RDLMServerException
from httpretty import HTTPretty, httprettified
import base64
import os
class TestClient(unittest.TestCase):
client = None
server = "localhost"
port = 8888
baseurl = None
resource = "foo"
def setUp(self):
self.baseurl = "http://%s:%i" % (self.server, self.port)
self.client = RDLMClient(server=self.server, port=self.port)
self.assertFalse(self.client is None)
def tearDown(self):
pass
def _make_lock_object(self):
url = "%s/locks/%s" % (self.baseurl, self.resource)
lock_url = "%s/ff14608f6ab342f0bb2a86d551d42a8c" % url
HTTPretty.register_uri(HTTPretty.POST, url, status=201, location=lock_url)
r = self.client.lock_acquire(self.resource)
self.assertFalse(r is None)
return (r, lock_url)
@httprettified
def test_acquire(self):
(lock_u, lock_url) = self._make_lock_object()
self.assertEqual(lock_u, lock_url)
@httprettified
def _test_acquire_exception(self, status_code, exception):
url = "%s/locks/%s" % (self.baseurl, self.resource)
if status_code == 201:
lock_url = "%s/ff14608f6ab342f0bb2a86d551d42a8c" % url
HTTPretty.register_uri(HTTPretty.POST, url, status=status_code, location=lock_url)
else:
HTTPretty.register_uri(HTTPretty.POST, url, status=status_code)
try:
self.client.lock_acquire(self.resource)
raise Exception("no exception raised")
except exception:
pass
@httprettified
def test_acquire_408(self):
self._test_acquire_exception(408, RDLMLockWaitExceededException)
@httprettified
def test_acquire_500(self):
self._test_acquire_exception(500, RDLMServerException)
@httprettified
def test_acquire_409(self):
self._test_acquire_exception(409, RDLMLockDeletedException)
@httprettified
def test_release(self):
(lock_u, lock_url) = self._make_lock_object()
self.assertEqual(lock_u, lock_url)
HTTPretty.register_uri(HTTPretty.DELETE, lock_url, status=204)
f = self.client.lock_release(lock_url)
self.assertTrue(f)
@httprettified
def test_delete_all_resources(self):
HTTPretty.register_uri(HTTPretty.DELETE, "%s/resources" % self.baseurl, status=401)
f = self.client.resource_delete_all()
self.assertFalse(f)
self.assertFalse('Authorization' in HTTPretty.last_request.headers)
HTTPretty.register_uri(HTTPretty.DELETE, "%s/resources" % self.baseurl, status=204)
f = self.client.resource_delete_all("foo", "bar")
self.assertTrue(f)
self.assertTrue('Authorization' in HTTPretty.last_request.headers)
b64 = base64.standard_b64encode(b"foo:bar")
expected = b"Basic " + b64
self.assertEqual(HTTPretty.last_request.headers['Authorization'].encode('ascii'), expected)
@httprettified
def test_delete_resource(self):
HTTPretty.register_uri(HTTPretty.DELETE, "%s/resources/foo" % self.baseurl, status=401)
f = self.client.resource_delete("foo")
self.assertFalse(f)
self.assertFalse('Authorization' in HTTPretty.last_request.headers)
HTTPretty.register_uri(HTTPretty.DELETE, "%s/resources/foo" % self.baseurl, status=204)
f = self.client.resource_delete("foo", username="foo", password="bar")
self.assertTrue(f)
self.assertTrue('Authorization' in HTTPretty.last_request.headers)
b64 = base64.standard_b64encode(b"foo:bar")
expected = b"Basic " + b64
self.assertEqual(HTTPretty.last_request.headers['Authorization'].encode('ascii'), expected)
@httprettified
def test_get(self):
lock_url = "%s/locks/foo/94e4458bad8248828213275ae0b17eae" % self.baseurl
HTTPretty.register_uri(HTTPretty.GET, lock_url, status=404)
f = self.client.lock_get(lock_url)
self.assertTrue(f is None)
json_file = os.path.join(os.path.dirname(__file__), "lock1.json")
with open(json_file, "r") as f:
body = f.read()
HTTPretty.register_uri(HTTPretty.GET, lock_url, status=200, body=body)
f = self.client.lock_get(lock_url)
self.assertFalse(f is None)
self.assertTrue(f.active)
self.assertEqual(f.title, "test title")
self.assertEqual(f.uid, "94e4458bad8248828213275ae0b17eae")
self.assertEqual(f.lifetime, 300)
self.assertEqual(f.wait, 10)
self.assertEqual(f.active_since.isoformat(), "2013-03-02T22:00:05")
self.assertEqual(f.active_expires.isoformat(), "2013-03-02T22:05:05")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "76933e9d838147ef686a0847955e1f16",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 99,
"avg_line_length": 40.333333333333336,
"alnum_prop": 0.6584710743801653,
"repo_name": "thefab/rdlm-py",
"id": "73994a752172724c3479b6b3416dce7d7d6f0f3e",
"size": "4840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28274"
}
],
"symlink_target": ""
}
|
"""Tests for the heuristic_payoff_table library."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.egt import heuristic_payoff_table
from open_spiel.python.egt import utils
import pyspiel
class ModuleLevelTest(absltest.TestCase):
def test__multinomial_coefficients(self):
distributions = np.asarray([
[2, 0],
[1, 1],
[1, 0],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 2., 1.], coefficients)
distributions = np.asarray([
[3, 0],
[2, 1],
[1, 2],
[0, 3],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 3., 3., 1.], coefficients)
distributions = np.asarray([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 1., 1., 2., 2., 2.], coefficients)
class PayoffTableTest(parameterized.TestCase):
@parameterized.parameters(
(5, 2),
(2, 2),
)
def test_construction(self, num_players, num_strategies):
logging.info("Testing payoff table construction.")
table = heuristic_payoff_table.PayoffTable(num_players, num_strategies)
num_rows = utils.n_choose_k(num_players + num_strategies - 1, num_players)
distributions = np.array(
list(utils.distribute(num_players, num_strategies)))
payoffs = np.full([int(num_rows), num_strategies], np.nan)
np.testing.assert_array_equal(
np.concatenate([distributions, payoffs], axis=1), table())
def test_from_heuristic_payoff_table(self):
team_compositions = np.asarray([
[2, 0],
[1, 1],
[0, 2],
])
payoffs = np.asarray([
[1, 2],
[3, 4],
[5, 6],
])
hpt = np.hstack([team_compositions, payoffs])
table = heuristic_payoff_table.from_heuristic_payoff_table(hpt)
np.testing.assert_array_equal(team_compositions, table._distributions)
np.testing.assert_array_equal(payoffs, table._payoffs)
self.assertEqual(3, table.num_rows)
distributions = np.asarray([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
])
shape = distributions.shape
payoffs = np.reshape(np.arange(np.prod(shape)), shape)
hpt = np.hstack([distributions, payoffs])
table = heuristic_payoff_table.from_heuristic_payoff_table(hpt)
np.testing.assert_array_equal(distributions, table._distributions)
np.testing.assert_array_equal(payoffs, table._payoffs)
self.assertEqual(distributions.shape[0], table.num_rows)
@parameterized.parameters(("matrix_rps",))
def test_from_matrix_game(self, game):
game = pyspiel.load_matrix_game(game)
payoff_tables = utils.game_payoffs_array(game)
logging.info("Testing payoff table construction for matrix game.")
table = heuristic_payoff_table.from_matrix_game(payoff_tables[0])
print(table())
@parameterized.parameters((np.array([0.7, 0.2, 0.1]),))
def test_expected_payoff(self, strategy):
logging.info("Testing expected payoff for matrix game.")
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
table = heuristic_payoff_table.from_matrix_game(payoff_tables[0])
expected_payoff = table.expected_payoff(strategy)
print(expected_payoff)
assert len(expected_payoff) == table._num_strategies
def test_from_elo_scores(self):
elo_scores = [800, 400, 400]
elo_1 = 10**(800 / 400)
elo_2 = 10**(400 / 400) # This is also the associated value for player 3.
expected = np.asarray([
[2, 0, 0, 1 / 2, 0, 0],
[0, 2, 0, 0, 1 / 2, 0],
[0, 0, 2, 0, 0, 1 / 2],
[1, 1, 0, elo_1 / (elo_1 + elo_2), elo_2 / (elo_1 + elo_2), 0],
[1, 0, 1, elo_1 / (elo_1 + elo_2), 0, elo_2 / (elo_1 + elo_2)],
[0, 1, 1, 0, 1 / 2, 1 / 2],
])
htp = heuristic_payoff_table.from_elo_scores(elo_scores)
np.testing.assert_array_almost_equal(
utils.sort_rows_lexicographically(expected),
utils.sort_rows_lexicographically(htp()),
verbose=True)
if __name__ == "__main__":
absltest.main()
|
{
"content_hash": "99803fcedb29e5086a8552846783c922",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 31.985714285714284,
"alnum_prop": 0.6185797230906654,
"repo_name": "deepmind/open_spiel",
"id": "48e84b07fd01e370712de977f7818cd2e7258a5a",
"size": "5073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_spiel/python/egt/heuristic_payoff_table_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6640"
},
{
"name": "C++",
"bytes": "4649139"
},
{
"name": "CMake",
"bytes": "78467"
},
{
"name": "Go",
"bytes": "18010"
},
{
"name": "Julia",
"bytes": "16727"
},
{
"name": "Jupyter Notebook",
"bytes": "148663"
},
{
"name": "Python",
"bytes": "2823600"
},
{
"name": "Rust",
"bytes": "18562"
},
{
"name": "Shell",
"bytes": "51087"
}
],
"symlink_target": ""
}
|
from gdcdictionary import gdcdictionary
class GDCGraphValidator(object):
"""
Validator that validates entities' relationship with existing nodes in
database.
"""
def __init__(self):
self.schemas = gdcdictionary
self.required_validators = {
"links_validator": GDCLinksValidator(),
"uniqueKeys_validator": GDCUniqueKeysValidator(),
}
self.optional_validators = {}
def record_errors(self, graph, entities):
for validator in self.required_validators.values():
validator.validate(entities, graph)
for entity in entities:
schema = self.schemas.schema[entity.node.label]
validators = schema.get("validators")
if validators:
for validator_name in validators:
self.optional_validators[validator_name].validate()
class GDCLinksValidator(object):
def validate(self, entities, graph=None):
for entity in entities:
for link in gdcdictionary.schema[entity.node.label]["links"]:
if "name" in link:
self.validate_edge(link, entity)
elif "subgroup" in link:
self.validate_edge_group(link, entity)
def validate_edge_group(self, schema, entity):
submitted_links = []
schema_links = []
num_of_edges = 0
for group in schema["subgroup"]:
if "subgroup" in schema["subgroup"]:
# nested subgroup
result = self.validate_edge_group(group, entity)
if "name" in group:
result = self.validate_edge(group, entity)
if result["length"] > 0:
submitted_links.append(result)
num_of_edges += result["length"]
schema_links.append(result["name"])
if schema.get("required") is True and len(submitted_links) == 0:
names = ", ".join(schema_links[:-2] + [" or ".join(schema_links[-2:])])
entity.record_error(
"Entity is missing a required link to {}".format(names),
keys=schema_links,
)
if schema.get("exclusive") is True and len(submitted_links) > 1:
names = ", ".join(schema_links[:-2] + [" and ".join(schema_links[-2:])])
entity.record_error(
"Links to {} are exclusive. More than one was provided: {}".format(
schema_links, entity.node.edges_out
),
keys=schema_links,
)
for edge in entity.node.edges_out:
entity.record_error("{}".format(edge.dst.submitter_id))
result = {"length": num_of_edges, "name": ", ".join(schema_links)}
def validate_edge(self, link_sub_schema, entity):
association = link_sub_schema["name"]
node = entity.node
targets = node[association]
result = {"length": len(targets), "name": association}
if len(targets) > 0:
multi = link_sub_schema["multiplicity"]
if multi in ["many_to_one", "one_to_one"]:
if len(targets) > 1:
entity.record_error(
"'{}' link has to be {}".format(association, multi),
keys=[association],
)
if multi in ["one_to_many", "one_to_one"]:
for target in targets:
if len(target[link_sub_schema["backref"]]) > 1:
entity.record_error(
"'{}' link has to be {}, target node {} already has {}".format(
association,
multi,
target.label,
link_sub_schema["backref"],
),
keys=[association],
)
if multi == "many_to_many":
pass
else:
if link_sub_schema.get("required") is True:
entity.record_error(
"Entity is missing required link to {}".format(association),
keys=[association],
)
return result
class GDCUniqueKeysValidator(object):
def validate(self, entities, graph=None):
for entity in entities:
schema = gdcdictionary.schema[entity.node.label]
node = entity.node
for keys in schema["uniqueKeys"]:
props = {}
if keys == ["id"]:
continue
for key in keys:
prop = schema["properties"][key].get("systemAlias")
if prop:
props[prop] = node[prop]
else:
props[key] = node[key]
if graph.nodes().props(props).count() > 1:
entity.record_error(
"{} with {} already exists in the GDC".format(
node.label, props
),
keys=list(props.keys()),
)
|
{
"content_hash": "d7afc8b378a946cda400a2c040da51aa",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 91,
"avg_line_length": 37.65942028985507,
"alnum_prop": 0.4906676928997499,
"repo_name": "NCI-GDC/gdcdatamodel",
"id": "093e220e40bb7b729c8476738997f940f1b41431",
"size": "5197",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gdcdatamodel/validators/graph_validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "153557"
},
{
"name": "Shell",
"bytes": "207"
}
],
"symlink_target": ""
}
|
import random
import math
from datetime import timedelta
from scipy.stats import norm
class SpikeRequestGenerator:
def __init__(self, constant_rpm, quantum_seconds, max_error,
start_datetime):
self.quantum_seconds = quantum_seconds
self.max_error = max_error
self.constant_rpq = math.ceil(constant_rpm * (quantum_seconds / 60))
self.current_timestamp = start_datetime
def get_new_quantum_requests(self):
self.current_timestamp += timedelta(seconds=self.quantum_seconds)
rpq = self.constant_rpq + random.randint(
-self.max_error, self.max_error
)
rpq += norm.pdf(self.current_timestamp.minute - 30, scale=10) * 20000
rpq = int(rpq)
request_datetimes = [
self.current_timestamp for _ in range(rpq)
]
return request_datetimes
|
{
"content_hash": "e4dc36743c51c32f7006ba4951a1457a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.6384439359267735,
"repo_name": "swarmer/autoscaler",
"id": "dbe8f9c7d5899313ef44fe72ecf8d1170ced26bf",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoscaler/simulation/request_generators/spike.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8489"
}
],
"symlink_target": ""
}
|
"""Support for Dominos Pizza ordering."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components import http
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
# The domain of your component. Should be equal to the name of your component.
DOMAIN = "dominos"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ATTR_COUNTRY = "country_code"
ATTR_FIRST_NAME = "first_name"
ATTR_LAST_NAME = "last_name"
ATTR_EMAIL = "email"
ATTR_PHONE = "phone"
ATTR_ADDRESS = "address"
ATTR_ORDERS = "orders"
ATTR_SHOW_MENU = "show_menu"
ATTR_ORDER_ENTITY = "order_entity_id"
ATTR_ORDER_NAME = "name"
ATTR_ORDER_CODES = "codes"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
MIN_TIME_BETWEEN_STORE_UPDATES = timedelta(minutes=3330)
_ORDERS_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ORDER_NAME): cv.string,
vol.Required(ATTR_ORDER_CODES): vol.All(cv.ensure_list, [cv.string]),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(ATTR_COUNTRY): cv.string,
vol.Required(ATTR_FIRST_NAME): cv.string,
vol.Required(ATTR_LAST_NAME): cv.string,
vol.Required(ATTR_EMAIL): cv.string,
vol.Required(ATTR_PHONE): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Optional(ATTR_SHOW_MENU): cv.boolean,
vol.Optional(ATTR_ORDERS, default=[]): vol.All(
cv.ensure_list, [_ORDERS_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up is called when Home Assistant is loading our component."""
dominos = Dominos(hass, config)
component = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[DOMAIN] = {}
entities = []
conf = config[DOMAIN]
hass.services.register(DOMAIN, "order", dominos.handle_order)
if conf.get(ATTR_SHOW_MENU):
hass.http.register_view(DominosProductListView(dominos))
for order_info in conf.get(ATTR_ORDERS):
order = DominosOrder(order_info, dominos)
entities.append(order)
if entities:
component.add_entities(entities)
# Return boolean to indicate that initialization was successfully.
return True
class Dominos:
"""Main Dominos service."""
def __init__(self, hass, config):
"""Set up main service."""
conf = config[DOMAIN]
from pizzapi import Address, Customer
from pizzapi.address import StoreException
self.hass = hass
self.customer = Customer(
conf.get(ATTR_FIRST_NAME),
conf.get(ATTR_LAST_NAME),
conf.get(ATTR_EMAIL),
conf.get(ATTR_PHONE),
conf.get(ATTR_ADDRESS),
)
self.address = Address(
*self.customer.address.split(","), country=conf.get(ATTR_COUNTRY)
)
self.country = conf.get(ATTR_COUNTRY)
try:
self.closest_store = self.address.closest_store()
except StoreException:
self.closest_store = None
def handle_order(self, call):
"""Handle ordering pizza."""
entity_ids = call.data.get(ATTR_ORDER_ENTITY, None)
target_orders = [
order
for order in self.hass.data[DOMAIN]["entities"]
if order.entity_id in entity_ids
]
for order in target_orders:
order.place()
@Throttle(MIN_TIME_BETWEEN_STORE_UPDATES)
def update_closest_store(self):
"""Update the shared closest store (if open)."""
from pizzapi.address import StoreException
try:
self.closest_store = self.address.closest_store()
return True
except StoreException:
self.closest_store = None
return False
def get_menu(self):
"""Return the products from the closest stores menu."""
self.update_closest_store()
if self.closest_store is None:
_LOGGER.warning("Cannot get menu. Store may be closed")
return []
menu = self.closest_store.get_menu()
product_entries = []
for product in menu.products:
item = {}
if isinstance(product.menu_data["Variants"], list):
variants = ", ".join(product.menu_data["Variants"])
else:
variants = product.menu_data["Variants"]
item["name"] = product.name
item["variants"] = variants
product_entries.append(item)
return product_entries
class DominosProductListView(http.HomeAssistantView):
"""View to retrieve product list content."""
url = "/api/dominos"
name = "api:dominos"
def __init__(self, dominos):
"""Initialize suite view."""
self.dominos = dominos
@callback
def get(self, request):
"""Retrieve if API is running."""
return self.json(self.dominos.get_menu())
class DominosOrder(Entity):
"""Represents a Dominos order entity."""
def __init__(self, order_info, dominos):
"""Set up the entity."""
self._name = order_info["name"]
self._product_codes = order_info["codes"]
self._orderable = False
self.dominos = dominos
@property
def name(self):
"""Return the orders name."""
return self._name
@property
def product_codes(self):
"""Return the orders product codes."""
return self._product_codes
@property
def orderable(self):
"""Return the true if orderable."""
return self._orderable
@property
def state(self):
"""Return the state either closed, orderable or unorderable."""
if self.dominos.closest_store is None:
return "closed"
return "orderable" if self._orderable else "unorderable"
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the order state and refreshes the store."""
from pizzapi.address import StoreException
try:
self.dominos.update_closest_store()
except StoreException:
self._orderable = False
return
try:
order = self.order()
order.pay_with()
self._orderable = True
except StoreException:
self._orderable = False
def order(self):
"""Create the order object."""
from pizzapi import Order
from pizzapi.address import StoreException
if self.dominos.closest_store is None:
raise StoreException
order = Order(
self.dominos.closest_store,
self.dominos.customer,
self.dominos.address,
self.dominos.country,
)
for code in self._product_codes:
order.add_item(code)
return order
def place(self):
"""Place the order."""
from pizzapi.address import StoreException
try:
order = self.order()
order.place()
except StoreException:
self._orderable = False
_LOGGER.warning(
"Attempted to order Dominos - Order invalid or store closed"
)
|
{
"content_hash": "f823a44b030f911d68b3723fe0e02c69",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 78,
"avg_line_length": 28.976744186046513,
"alnum_prop": 0.598314606741573,
"repo_name": "fbradyirl/home-assistant",
"id": "59869ed0a977d16b786c7bec005b6d62954d37a0",
"size": "7476",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/dominos/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
|
{
"content_hash": "06ab6b69afd319387c2f4412273346c5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 72,
"avg_line_length": 37.375,
"alnum_prop": 0.7658862876254181,
"repo_name": "mailme/mailme.io",
"id": "5956543f56067eeba5f8a25c084ce2f33e2907b3",
"size": "299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/mailme/utils/mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39916"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Python",
"bytes": "161761"
},
{
"name": "Ruby",
"bytes": "1212"
},
{
"name": "Shell",
"bytes": "5106"
}
],
"symlink_target": ""
}
|
import pytest
pytestmark = pytest.mark.page('definition_lists.html')
def test_returns_the_matching_elements(browser):
assert list(browser.dts(class_name='current-industry')) == \
[browser.dt(class_name='current-industry')]
def test_returns_the_number_of_divs(browser):
assert len(browser.dts()) == 11
def test_returns_the_dt_at_the_given_index(browser):
assert browser.dts()[0].id == 'experience'
def test_iterates_through_dts_correctly(browser):
count = 0
for index, d in enumerate(browser.dts()):
dt = browser.dt(index=index)
assert d.id == dt.id
assert d.class_name == dt.class_name
count += 1
assert count > 0
|
{
"content_hash": "3a9ff6b711f77e7660c2df8c08cdc0b9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 26.423076923076923,
"alnum_prop": 0.6622998544395924,
"repo_name": "lmtierney/watir-snake",
"id": "38fb786c80475f833f765a6048c9ec66f035d127",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/browser/elements/dts_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "403217"
}
],
"symlink_target": ""
}
|
'''
Wavenet model definition and generation using contrib.layers
- Carl Quillen
'''
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.contrib.framework import arg_scope
def wavenet_block(xpad, x, conditioning, num_outputs,
num_outputs2, rate, is_training, opts, scope):
'''
wavenet_block: many important convolution parameters (reuse, kernel_size
etc.) come from the arg_scope() and are set by wavenet().
Note that convolutions below end up being causal because the
input is padded and padding='VALID'. This causes samples to
drop in the "right" way.
'''
dropout, kernel_size, skip_dimension, histogram_summaries = (
opts.dropout, opts.kernel_size, opts.skip_dimension,
opts.histogram_summaries)
conv_gate = layers.conv2d(
xpad, num_outputs=num_outputs2*2, rate=rate, kernel_size=kernel_size,
activation_fn=None, normalizer_params=None, scope=scope + '/conv_gate')
# Add the conditioning.
conv_gate += layers.conv2d(
conditioning, num_outputs=num_outputs2*2, rate=rate, kernel_size=1,
activation_fn=None, normalizer_params=None, scope=scope + '/cur_cond')
with tf.name_scope(scope + '/activation'):
conv = tf.nn.tanh(conv_gate[:, :, :num_outputs2], name='conv')
gate = tf.nn.sigmoid(conv_gate[:, :, num_outputs2:], name='gate')
with tf.name_scope(scope + '/prod'):
out = conv * gate
if dropout > 0:
out = layers.dropout(out, keep_prob=dropout,
is_training=is_training, scope=scope + '/dropout')
out = layers.conv2d(out, num_outputs=num_outputs, kernel_size=1,
activation_fn=None,
scope=scope + '/output_xform')
with tf.name_scope(scope + '/residual'):
residual = x + out
if skip_dimension != num_outputs: # Upscale for more goodness.
out = layers.conv2d(out, num_outputs=skip_dimension,
kernel_size=1, activation_fn=None,
scope=scope + '/skip_upscale')
if histogram_summaries:
tf.summary.histogram(name=scope + '/conv', values=conv)
tf.summary.histogram(name=scope + '/gate', values=gate)
tf.summary.histogram(name=scope + '/out', values=out)
return residual, out # out gets added to the skip connections.
def padded(new_x, pad, scope, n_chunks, reuse=False,
reverse=False, data_format=None):
'''
Pad new_x, and save the rightmost window for context for the next time
we do the same convolution. This context carries across utterances
during training. Using this trick also allows us to use the same
wavenet() routine in training as well as generation.
reverse=True for reversing the direction of causality.
'''
with tf.variable_scope(scope, reuse=reuse):
if data_format is 'NCW':
x = tf.get_variable(
'pad', shape=(n_chunks, new_x.get_shape()[1], pad),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'padding'],
initializer=tf.constant_initializer(), trainable=False)
if not reverse:
y = tf.concat(values=(x, new_x), axis=2)
x = tf.assign(x, y[:, :, -pad:])
else:
y = tf.concat(values=(new_x, x), axis=2)
x = tf.assign(x, y[:, :, :pad])
else:
x = tf.get_variable(
'pad', shape=(n_chunks, pad, new_x.get_shape()[2]),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'padding'],
initializer=tf.constant_initializer(), trainable=False)
if not reverse:
y = tf.concat(values=(x, new_x), axis=1)
x = tf.assign(x, y[:, -pad:, :])
else:
y = tf.concat(values=(new_x, x), axis=1)
x = tf.assign(x, y[:, :pad, :])
with tf.get_default_graph().control_dependencies([x]):
return tf.identity(y)
def wavenet(inputs, opts, is_training=True, reuse=False, pad_reuse=False,
data_format=None, extra_pad_scope=''):
'''
The wavenet model definition for training/generation.
use wavenets recursively, we will want separate padding variables for
each "layer". So we have a separate reuse flag for padding() and
an additional thing to add to the scope for padding() in that case.
'''
# Parameters for batch normalization
normalizer_params = {}
if opts.batch_norm:
normalizer_params = {
'normalizer_fn': layers.batch_norm,
'normalizer_params': {
'is_training': is_training,
'trainable': False,
'variables_collections': {
'gamma': ['batch_norm']},
'reuse': reuse,
'scale': True, # Update Variance too.
'scope': 'BatchNorm'
}
}
l2reg = None
if 'l2reg' in vars(opts):
l2reg = tf.contrib.layers.l2_regularizer(opts.l2reg)
# unpack inputs.
inputs, user, alignment, lf0 = inputs
with tf.variable_scope('conditioning'):
conditioning = tf.one_hot(alignment, depth=opts.n_phones,
name='align_onehot')
conditioning = tf.reshape(
conditioning, (opts.n_chunks, -1, opts.n_phones*opts.context))
if user is not None:
user = tf.one_hot(user, depth=opts.n_users, name='user_onehot')
conditioning = tf.concat([user, conditioning], axis=2,
name='cat_user')
lf0 = tf.reshape(lf0, (opts.n_chunks, -1, 1))
conditioning = tf.concat((conditioning, lf0), axis=2, name='cat_lf0')
# The arg_scope below will apply to all convolutions, including the ones
# in wavenet_block().
with arg_scope([layers.conv2d], data_format=data_format,
reuse=reuse, padding='VALID', weights_regularizer=l2reg,
**normalizer_params):
if opts.input_kernel_size > 1:
inputs = padded(new_x=inputs, reuse=pad_reuse,
reverse=opts.reverse, pad=opts.input_kernel_size-1,
n_chunks=opts.n_chunks, data_format=data_format,
scope='input_layer/pad'+extra_pad_scope)
x = layers.conv2d(inputs, num_outputs=opts.num_outputs,
kernel_size=opts.input_kernel_size, rate=1,
activation_fn=tf.nn.tanh, scope='input_layer')
skip_connections = 0
for i_block, block_dilations in enumerate(opts.dilations):
for rate in block_dilations:
block_rate = "block_{}/rate_{}".format(i_block, rate)
xpad = padded(
new_x=x, pad=rate*(opts.kernel_size-1),
reuse=pad_reuse, n_chunks=opts.n_chunks,
reverse=opts.reverse, data_format=data_format,
scope=block_rate+"/pad"+extra_pad_scope)
x, skip_connection = wavenet_block(
xpad, x, conditioning, opts.num_outputs,
opts.num_outputs2, rate, is_training,
opts, scope=block_rate)
with tf.name_scope(block_rate+"_skip".format(i_block, rate)):
skip_connections += skip_connection
with tf.name_scope("relu_skip"):
skip_connections = tf.nn.relu(skip_connections)
with arg_scope([layers.conv2d], kernel_size=1, reuse=reuse,
data_format=data_format):
x = layers.conv2d(
skip_connections, num_outputs=opts.skip_dimension, # ?
activation_fn=tf.nn.relu, scope='output_layer1')
mfcc = layers.conv2d(
x, num_outputs=opts.skip_dimension, # ?
activation_fn=tf.nn.relu, scope='mfcc_layer1')
x = layers.conv2d(
x, num_outputs=3,
normalizer_params=None,
activation_fn=None, scope='output_layer2')
mfcc = layers.conv2d(
mfcc, num_outputs=opts.n_mfcc, normalizer_params=None,
activation_fn=None, scope='mfcc_layer2')
with tf.name_scope("unpack_output"):
mu = x[:, :, 0]
r = tf.sin(x[:, :, 1]/opts.r_scale)*opts.r_scale
r = r*r + 1.0
q = 0.999*r*tf.sin(x[:, :, 2])
return mu, r, q, mfcc
def compute_overlap(opts):
total_lost = opts.input_kernel_size-1
for block_dilations in opts.dilations:
for rate in block_dilations:
total_lost += rate*(opts.kernel_size-1)
return total_lost
|
{
"content_hash": "479742ae82857e013f53323f35da75c1",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 41,
"alnum_prop": 0.574144051299668,
"repo_name": "cbquillen/wavenet_experiment",
"id": "2c1845d2816d55c4cc1aea6b53a63906b08444f6",
"size": "8755",
"binary": false,
"copies": "1",
"ref": "refs/heads/2-sided-laplace",
"path": "wavenet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63121"
},
{
"name": "R",
"bytes": "989"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OccupationalStandard.attachment'
db.add_column(u'admin_occupationalstandard', 'attachment',
self.gf('django.db.models.fields.files.FileField')(default='', max_length=100),
keep_default=False)
# Adding field 'QualificationPack.attachment'
db.add_column(u'admin_qualificationpack', 'attachment',
self.gf('django.db.models.fields.files.FileField')(default='', max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OccupationalStandard.attachment'
db.delete_column(u'admin_occupationalstandard', 'attachment')
# Deleting field 'QualificationPack.attachment'
db.delete_column(u'admin_qualificationpack', 'attachment')
models = {
'admin.company': {
'Meta': {'object_name': 'Company'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
'admin.institution': {
'Meta': {'object_name': 'Institution'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'international': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'})
},
u'admin.logentry': {
'Meta': {'ordering': "(u'-action_time',)", 'object_name': 'LogEntry', 'db_table': "u'django_admin_log'"},
'action_flag': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'action_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'change_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'admin.occupationalstandard': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'OccupationalStandard'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'knowledge': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'performace_criteria': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'scope': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"}),
'skills': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.qualificationpack': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'QualificationPack'},
'alias': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'experience': ('django.db.models.fields.TextField', [], {'default': 'None'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'job_role': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'max_educational_qualification': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'min_educational_qualification': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'nveqf_level': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '5'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'os_compulsory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'os_compulsory'", 'symmetrical': 'False', 'to': "orm['admin.OccupationalStandard']"}),
'os_optional': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'os_optional'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['admin.OccupationalStandard']"}),
'role_description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'training': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['admin']
|
{
"content_hash": "2a46749136a23b3ca05c043c881f5565",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 218,
"avg_line_length": 77.58783783783784,
"alnum_prop": 0.5562135330488548,
"repo_name": "arpitprogressive/arpittest",
"id": "ca9018caa10bd2e31b57793eb67b3e7bef77971c",
"size": "11507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/admin/migrations/0002_auto__add_field_occupationalstandard_attachment__add_field_qualificati.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133532"
},
{
"name": "JavaScript",
"bytes": "227983"
},
{
"name": "Python",
"bytes": "782274"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
}
|
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.directconnect import exceptions
from boto.compat import json
class DirectConnectConnection(AWSQueryConnection):
"""
AWS Direct Connect makes it easy to establish a dedicated network
connection from your premises to Amazon Web Services (AWS). Using
AWS Direct Connect, you can establish private connectivity between
AWS and your data center, office, or colocation environment, which
in many cases can reduce your network costs, increase bandwidth
throughput, and provide a more consistent network experience than
Internet-based connections.
The AWS Direct Connect API Reference provides descriptions,
syntax, and usage examples for each of the actions and data types
for AWS Direct Connect. Use the following links to get started
using the AWS Direct Connect API Reference :
+ `Actions`_: An alphabetical list of all AWS Direct Connect
actions.
+ `Data Types`_: An alphabetical list of all AWS Direct Connect
data types.
+ `Common Query Parameters`_: Parameters that all Query actions
can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
"""
APIVersion = "2012-10-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
ServiceName = "DirectConnect"
TargetPrefix = "OvertureService"
ResponseError = JSONResponseError
_faults = {
"DirectConnectClientException": exceptions.DirectConnectClientException,
"DirectConnectServerException": exceptions.DirectConnectServerException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DirectConnectConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def allocate_connection_on_interconnect(self, bandwidth, connection_name,
owner_account, interconnect_id,
vlan):
"""
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth
for use by a hosted connection on the given interconnect.
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: " 500Mbps "
Default: None
:type connection_name: string
:param connection_name: Name of the provisioned connection.
Example: " 500M Connection to AWS "
Default: None
:type owner_account: string
:param owner_account: Numeric account Id of the customer for whom the
connection will be provisioned.
Example: 123443215678
Default: None
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which the connection
will be provisioned.
Example: dxcon-456abc78
Default: None
:type vlan: integer
:param vlan: The dedicated VLAN provisioned to the connection.
Example: 101
Default: None
"""
params = {
'bandwidth': bandwidth,
'connectionName': connection_name,
'ownerAccount': owner_account,
'interconnectId': interconnect_id,
'vlan': vlan,
}
return self.make_request(action='AllocateConnectionOnInterconnect',
body=json.dumps(params))
def allocate_private_virtual_interface(self, connection_id,
owner_account,
new_private_virtual_interface_allocation):
"""
Provisions a private virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
private virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPrivateVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the private virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new private
virtual interface.
Default: None
:type new_private_virtual_interface_allocation: dict
:param new_private_virtual_interface_allocation: Detailed information
for the private virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
}
return self.make_request(action='AllocatePrivateVirtualInterface',
body=json.dumps(params))
def allocate_public_virtual_interface(self, connection_id, owner_account,
new_public_virtual_interface_allocation):
"""
Provisions a public virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
public virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPublicVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the public virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new public
virtual interface.
Default: None
:type new_public_virtual_interface_allocation: dict
:param new_public_virtual_interface_allocation: Detailed information
for the public virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
}
return self.make_request(action='AllocatePublicVirtualInterface',
body=json.dumps(params))
def confirm_connection(self, connection_id):
"""
Confirm the creation of a hosted connection on an
interconnect.
Upon creation, the hosted connection is initially in the
'Ordering' state, and will remain in this state until the
owner calls ConfirmConnection to confirm creation of the
hosted connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='ConfirmConnection',
body=json.dumps(params))
def confirm_private_virtual_interface(self, virtual_interface_id,
virtual_gateway_id):
"""
Accept ownership of a private virtual interface created by
another customer.
After the virtual interface owner calls this function, the
virtual interface will be created and attached to the given
virtual private gateway, and will be available for handling
traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
:type virtual_gateway_id: string
:param virtual_gateway_id: ID of the virtual private gateway that will
be attached to the virtual interface.
A virtual private gateway can be managed via the Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
Default: None
"""
params = {
'virtualInterfaceId': virtual_interface_id,
'virtualGatewayId': virtual_gateway_id,
}
return self.make_request(action='ConfirmPrivateVirtualInterface',
body=json.dumps(params))
def confirm_public_virtual_interface(self, virtual_interface_id):
"""
Accept ownership of a public virtual interface created by
another customer.
After the virtual interface owner calls this function, the
specified virtual interface will be created and made available
for handling traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='ConfirmPublicVirtualInterface',
body=json.dumps(params))
def create_connection(self, location, bandwidth, connection_name):
"""
Creates a new connection between the customer network and a
specific AWS Direct Connect location.
A connection links your internal network to an AWS Direct
Connect location over a standard 1 gigabit or 10 gigabit
Ethernet fiber-optic cable. One end of the cable is connected
to your router, the other to an AWS Direct Connect router. An
AWS Direct Connect location provides access to Amazon Web
Services in the region it is associated with. You can
establish connections with AWS Direct Connect locations in
multiple regions, but a connection in one region does not
provide connectivity to other regions.
:type location: string
:param location: Where the connection is located.
Example: EqSV5
Default: None
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: 1Gbps
Default: None
:type connection_name: string
:param connection_name: The name of the connection.
Example: " My Connection to AWS "
Default: None
"""
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': connection_name,
}
return self.make_request(action='CreateConnection',
body=json.dumps(params))
def create_interconnect(self, interconnect_name, bandwidth, location):
"""
Creates a new interconnect between a AWS Direct Connect
partner's network and a specific AWS Direct Connect location.
An interconnect is a connection which is capable of hosting
other connections. The AWS Direct Connect partner can use an
interconnect to provide sub-1Gbps AWS Direct Connect service
to tier 2 customers who do not have their own connections.
Like a standard connection, an interconnect links the AWS
Direct Connect partner's network to an AWS Direct Connect
location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
optic cable. One end is connected to the partner's router, the
other to an AWS Direct Connect router.
For each end customer, the AWS Direct Connect partner
provisions a connection on their interconnect by calling
AllocateConnectionOnInterconnect. The end customer can then
connect to AWS resources by creating a virtual interface on
their connection, using the VLAN assigned to them by the AWS
Direct Connect partner.
:type interconnect_name: string
:param interconnect_name: The name of the interconnect.
Example: " 1G Interconnect to AWS "
Default: None
:type bandwidth: string
:param bandwidth: The port bandwidth
Example: 1Gbps
Default: None
Available values: 1Gbps,10Gbps
:type location: string
:param location: Where the interconnect is located
Example: EqSV5
Default: None
"""
params = {
'interconnectName': interconnect_name,
'bandwidth': bandwidth,
'location': location,
}
return self.make_request(action='CreateInterconnect',
body=json.dumps(params))
def create_private_virtual_interface(self, connection_id,
new_private_virtual_interface):
"""
Creates a new private virtual interface. A virtual interface
is the VLAN that transports AWS Direct Connect traffic. A
private virtual interface supports sending traffic to a single
virtual private cloud (VPC).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_private_virtual_interface: dict
:param new_private_virtual_interface: Detailed information for the
private virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPrivateVirtualInterface': new_private_virtual_interface,
}
return self.make_request(action='CreatePrivateVirtualInterface',
body=json.dumps(params))
def create_public_virtual_interface(self, connection_id,
new_public_virtual_interface):
"""
Creates a new public virtual interface. A virtual interface is
the VLAN that transports AWS Direct Connect traffic. A public
virtual interface supports sending traffic to public services
of AWS such as Amazon Simple Storage Service (Amazon S3).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_public_virtual_interface: dict
:param new_public_virtual_interface: Detailed information for the
public virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPublicVirtualInterface': new_public_virtual_interface,
}
return self.make_request(action='CreatePublicVirtualInterface',
body=json.dumps(params))
def delete_connection(self, connection_id):
"""
Deletes the connection.
Deleting a connection only stops the AWS Direct Connect port
hour and data transfer charges. You need to cancel separately
with the providers any services or charges for cross-connects
or network circuits that connect you to the AWS Direct Connect
location.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='DeleteConnection',
body=json.dumps(params))
def delete_interconnect(self, interconnect_id):
"""
Deletes the specified interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DeleteInterconnect',
body=json.dumps(params))
def delete_virtual_interface(self, virtual_interface_id):
"""
Deletes a virtual interface.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='DeleteVirtualInterface',
body=json.dumps(params))
def describe_connections(self, connection_id=None):
"""
Displays all connections in this region.
If a connection ID is provided, the call returns only that
particular connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
return self.make_request(action='DescribeConnections',
body=json.dumps(params))
def describe_connections_on_interconnect(self, interconnect_id):
"""
Return a list of connections that have been provisioned on the
given interconnect.
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which a list of
connection is provisioned.
Example: dxcon-abc123
Default: None
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DescribeConnectionsOnInterconnect',
body=json.dumps(params))
def describe_interconnects(self, interconnect_id=None):
"""
Returns a list of interconnects owned by the AWS account.
If an interconnect ID is provided, it will only return this
particular interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {}
if interconnect_id is not None:
params['interconnectId'] = interconnect_id
return self.make_request(action='DescribeInterconnects',
body=json.dumps(params))
def describe_locations(self):
"""
Returns the list of AWS Direct Connect locations in the
current AWS region. These are the locations that may be
selected when calling CreateConnection or CreateInterconnect.
"""
params = {}
return self.make_request(action='DescribeLocations',
body=json.dumps(params))
def describe_virtual_gateways(self):
"""
Returns a list of virtual private gateways owned by the AWS
account.
You can create one or more AWS Direct Connect private virtual
interfaces linking to a virtual private gateway. A virtual
private gateway can be managed via Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
"""
params = {}
return self.make_request(action='DescribeVirtualGateways',
body=json.dumps(params))
def describe_virtual_interfaces(self, connection_id=None,
virtual_interface_id=None):
"""
Displays all virtual interfaces for an AWS account. Virtual
interfaces deleted fewer than 15 minutes before
DescribeVirtualInterfaces is called are also returned. If a
connection ID is included then only virtual interfaces
associated with this connection will be returned. If a virtual
interface ID is included then only a single virtual interface
will be returned.
A virtual interface (VLAN) transmits the traffic between the
AWS Direct Connect location and the customer.
If a connection ID is provided, only virtual interfaces
provisioned on the specified connection will be returned. If a
virtual interface ID is provided, only this particular virtual
interface will be returned.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
if virtual_interface_id is not None:
params['virtualInterfaceId'] = virtual_interface_id
return self.make_request(action='DescribeVirtualInterfaces',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
{
"content_hash": "8330d6d77a02ece9bed6b59eaf823f18",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 93,
"avg_line_length": 37.018121911037895,
"alnum_prop": 0.6279483756119271,
"repo_name": "appneta/boto",
"id": "08197ddfa873003a8275abead648037cbabbbf3d",
"size": "23594",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "boto/directconnect/layer1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "5997207"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
"""
COHORTE Java isolate loader, based on jPype
**TODO:**
* Review constants names & values
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Modifications:
MOD_BD_20150916 Inherits PROP_NODE_DATA_DIR from pelix.
"""
# Python standard library
import logging
import os
import sys
import threading
import time
import cohorte
import cohorte.repositories
import cohorte.version
import herald
import jpype
import pelix.framework
from pelix.ipopo.decorators import ComponentFactory, Provides, Validate, \
Invalidate, Property, Requires
import pelix.shell
# iPOPO Decorators
# COHORTE constants
# Herald
# JPype (Java bridge)
# ------------------------------------------------------------------------------
# Bundle version
__version__ = cohorte.version.__version__
# ------------------------------------------------------------------------------
ISOLATE_LOADER_FACTORY = 'cohorte-loader-java-factory'
""" Forker loader factory name """
LOADER_KIND = 'osgi'
""" Kind of isolate started with this loader """
BUNDLE_SERVICES_FOLDER = 'META-INF/services'
""" Path of the descriptions of the bundle services (in a JAR) """
FRAMEWORK_SERVICE = 'org.osgi.framework.launch.FrameworkFactory'
""" FrameworkFactory service descriptor in the framework JAR file """
FRAMEWORK_SYSTEMPACKAGES_EXTRA = "org.osgi.framework.system.packages.extra"
""" OSGi extra system packages """
PYTHON_BRIDGE_BUNDLE_API = "org.cohorte.pyboot.api"
""" Name of the Python bridge API bundle """
PYTHON_BRIDGE_BUNDLE = "org.cohorte.pyboot"
""" Name of the Python bridge bundle """
PYTHON_JAVA_BRIDGE_INTERFACE = "org.cohorte.pyboot.api.IPyBridge"
""" Interface of the Python - Java bridge """
HERALD_BUNDLE_API = "org.cohorte.herald.api"
""" Name of the bundle and package which contain the Herald Event API """
HERALD_EVENT_INTERFACE = "org.cohorte.herald.eventapi.IEvent"
""" Interface of an Herald Event """
HERALD_EVENT_FACTORY_INTERFACE = "org.cohorte.herald.eventapi.IEventFactory"
""" Interface of the Herald EventFactory service """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class PyBridge(object):
"""
Python - Java bridge service implementation
"""
# pylint: disable=invalid-name
# Implemented Java interface
JAVA_INTERFACE = PYTHON_JAVA_BRIDGE_INTERFACE
def __init__(self, context, jvm, java_configuration, configuration_parser,
callback):
"""
Sets up the bridge
:param context: The bundle context
:param jvm: The JVM wrapper
:param java_configuration: Java boot configuration
:param callback: Method to call back on error or success
"""
# Bundle context
self._context = context
# Java class
self.ArrayList = jvm.load_class("java.util.ArrayList")
self.Component = jvm.load_class("org.cohorte.pyboot.api.ComponentBean")
self.HashMap = jvm.load_class("java.util.HashMap")
# Prepare members
self._callback = callback
self._components = {}
self._parser = configuration_parser
# Convert stored components
self._java_boot_config = self._to_java(java_configuration)
self._prepare_components(java_configuration.composition)
def _prepare_components(self, raw_components):
"""
Converts the Python Component objects into Java Component beans
:param raw_components: Python components representations
"""
for component in raw_components:
# Convert properties
properties = self.HashMap()
for key, value in component.properties.items():
properties.put(key, value)
# Store the component bean
self._components[component.name] = \
self.Component(component.factory, component.name, properties)
def _to_java(self, data):
"""
Recursively converts lists and maps to Java ones
:param data: Data to be converted
:return: Converted data
"""
try:
# Named tuple (in theory)
as_dict = getattr(data, '_asdict')
except AttributeError:
# Keep data as is
pass
else:
data = as_dict()
if isinstance(data, dict):
# Convert a dictionary
converted = self.HashMap()
for key, value in data.items():
# Convert entry
converted.put(self._to_java(key), self._to_java(value))
return converted
elif isinstance(data, (list, tuple, set)):
# Convert a list
converted = self.ArrayList()
for item in data:
# Convert each item
converted.add(self._to_java(item))
return converted
else:
# No conversion
return data
@staticmethod
def debug(message, values):
"""
Logs a debug message
"""
_logger.debug(message.format(*values))
@staticmethod
def error(message, values):
"""
Logs an error message
"""
_logger.error(message.format(*values))
def getComponents(self):
"""
Retrieves the components to instantiate (Java API)
:return: An array of components
"""
# Create a list
result = self.ArrayList()
for component in self._components.values():
result.add(component)
return result
def getStartConfiguration(self):
"""
Retrieves the configuration used to start this isolate as a map
:return: The configuration used to start this isolate
"""
return self._java_boot_config
@staticmethod
def getPid():
"""
Retrieves the Process ID of this isolate
:return: The isolate PID
"""
return os.getpid()
def getRemoteShellPort(self):
"""
Returns the port used by the Pelix remote shell, or -1 if the shell is
not active
:return: The port used by the remote shell, or -1
"""
ref = self._context.get_service_reference(
pelix.shell.REMOTE_SHELL_SPEC)
if ref is None:
return -1
try:
# Get the service
shell = self._context.get_service(ref)
# Get the shell port
port = shell.get_access()[1]
# Release the service
self._context.unget_service(ref)
return port
except pelix.framework.BundleException:
# Service lost (called while the framework was stopping)
return -1
def onComponentStarted(self, name):
"""
Called when a component has been started
:param name: Name of the started component
"""
if name in self._components:
del self._components[name]
if not self._components:
self._callback(True, "All components have been instantiated")
def onError(self, error):
"""
Called when an error has occurred
:param error: An error message
"""
self._callback(False, error)
def prepareIsolate(self, uid, name, node, kind, level, sublevel,
bundles, composition):
"""
Prepares the configuration dictionary of an isolate
"""
try:
conf = self._parser.prepare_isolate(
uid, name, node, kind, level, sublevel, bundles, composition)
except:
_logger.exception("Error preparing isolate...")
return None
return self._to_java(conf)
def readConfiguration(self, filename):
"""
Reads the given configuration file
:param filename: A configuration file name
:return: The parsed configuration map
"""
# Load the file
raw_dict = self._parser.read(filename)
# Convert the dictionary to Java
return self._to_java(raw_dict)
# ------------------------------------------------------------------------------
class EventFactory(object):
"""
Implementation of org.cohorte.herald.eventapi.IEventFactory
"""
JAVA_INTERFACE = HERALD_EVENT_FACTORY_INTERFACE
def __init__(self, java_svc):
"""
Sets up members
"""
self._java = java_svc
def createEvent(self):
"""
Creates an event for the Java world
"""
return self._java.make_proxy(EventProxy())
def sleep(self, milliseconds):
"""
Sleeps the given number of milliseconds
"""
time.sleep(milliseconds / 1000.)
def toString(self):
"""
Java toString() method
"""
return "Python Event Factory for Herald"
class EventProxy(object):
"""
Implementation of org.cohorte.herald.eventapi.IEvent
"""
JAVA_INTERFACE = HERALD_EVENT_INTERFACE
def __init__(self):
"""
Sets up members
"""
self.__event = threading.Event()
# Common names
for method in ('clear', 'isSet', 'set'):
setattr(self, method, getattr(self.__event, method))
def waitEvent(self, timeout_ms=None):
"""
Proxy to call the wait() method of the event
"""
if timeout_ms is None or timeout_ms < 0:
return self.__event.wait()
else:
return self.__event.wait(timeout_ms / 1000.)
def toString(self):
"""
Java toString() method
"""
return "Python EventProxy for Herald"
# ------------------------------------------------------------------------------
@ComponentFactory(ISOLATE_LOADER_FACTORY)
@Provides(cohorte.SERVICE_ISOLATE_LOADER)
@Property('_handled_kind', cohorte.SVCPROP_ISOLATE_LOADER_KIND, LOADER_KIND)
@Requires('_java', cohorte.SERVICE_JAVA_RUNNER)
@Requires('_repository', cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS,
spec_filter="({0}=java)"
.format(cohorte.repositories.PROP_REPOSITORY_LANGUAGE))
@Requires('_config', cohorte.SERVICE_CONFIGURATION_READER)
@Requires('_finder', cohorte.SERVICE_FILE_FINDER)
class JavaOsgiLoader(object):
"""
Pelix isolate loader. Needs a configuration to be given as a parameter of
the load() method.
"""
def __init__(self):
"""
Sets up members
"""
# Injected services
self._java = None
self._config = None
self._finder = None
self._repository = None
# Pelix bundle context
self._context = None
# OSGi Framework
self._osgi = None
# Bridge service registration
self._bridge_reg = None
@staticmethod
def _setup_vm_properties(properties):
"""
Sets up the JVM system properties dictionary (not the arguments)
:param properties: Configured properties
:return: VM properties dictionary
"""
# Prepare the dictionary
return properties.copy() if properties else {}
def _setup_osgi_properties(self, properties, allow_bridge, extra_packages=None):
"""
Sets up the OSGi framework properties and converts them into a Java
HashMap.
:param properties: Configured framework properties
:param allow_bridge: If True, the bridge API package will be exported
by the framework.
:return: The framework properties as a Java Map
"""
osgi_properties = self._java.load_class("java.util.HashMap")()
for key, value in properties.items():
if value is not None:
osgi_properties.put(key, str(value))
# Inherit some Pelix properties
for key in (cohorte.PROP_HOME, cohorte.PROP_BASE,
cohorte.PROP_UID, cohorte.PROP_NAME,
cohorte.PROP_NODE_UID, cohorte.PROP_NODE_NAME,
cohorte.PROP_NODE_DATA_DIR,
cohorte.PROP_DUMPER_PORT,
cohorte.PROP_FORKER_HTTP_PORT,
herald.FWPROP_PEER_UID, herald.FWPROP_PEER_NAME,
herald.FWPROP_NODE_UID, herald.FWPROP_NODE_NAME,
herald.FWPROP_APPLICATION_ID):
value = self._context.get_property(key)
if value is not None:
# Avoid empty values
osgi_properties.put(key, str(value))
# Special case: Herald groups (comma-separated list)
value = self._context.get_property(herald.FWPROP_PEER_GROUPS)
if value:
osgi_properties.put(herald.FWPROP_PEER_GROUPS,
','.join(str(group) for group in value))
new_extra_packages = None
if allow_bridge:
# Prepare the "extra system package" framework property
if extra_packages:
new_extra_packages = "{0}; version=1.0.0, {1}; version=1.0.0,{2}".format(
PYTHON_BRIDGE_BUNDLE_API, HERALD_BUNDLE_API, extra_packages)
else:
new_extra_packages = "{0}; version=1.0.0, {1}; version=1.0.0".format(
PYTHON_BRIDGE_BUNDLE_API, HERALD_BUNDLE_API)
else:
if extra_packages:
new_extra_packages = "{0}".format(extra_packages)
if new_extra_packages:
_logger.debug(
"Framework extra-packages={0}".format(new_extra_packages))
osgi_properties.put(
FRAMEWORK_SYSTEMPACKAGES_EXTRA, new_extra_packages)
else:
_logger.debug("No extra-packages!")
return osgi_properties
def _start_jvm(self, vm_args, classpath, properties):
"""
Starts the JVM, with the given file in the class path
:param vm_args: JVM arguments
:param classpath: A list of JAR files
:param properties: Java system properties
:raise KeyError: Error starting the JVM
:raise ValueError: Invalid JAR file
"""
# Start a JVM if necessary
if not self._java.is_running():
# Arguments given to the Java runner
java_args = []
if vm_args:
# VM specific arguments first
java_args.extend(vm_args)
# DEBUG: Remote debug server
# java_args.append("-Xdebug")
# java_args.append("-Xrunjdwp:transport=dt_socket,"
# "server=y,suspend=y,address=5005")
# Set the class path as a parameter
java_args.append(self._java.make_jvm_classpath(classpath))
# Prepare the JVM properties definitions
for key, value in self._setup_vm_properties(properties).items():
java_args.append(self._java.make_jvm_property(key, value))
_logger.info("java argument {}".format(java_args))
self._java.start(None, *java_args)
else:
# Add the JAR to the class path
for jar_file in classpath:
self._java.add_jar(jar_file)
def _close_osgi(self):
"""
Stops the OSGi framework and clears all references to it
"""
# Unregister services
if self._bridge_reg is not None:
self._bridge_reg.unregister()
self._bridge_reg = None
# Stop the framework
if self._osgi is not None:
self._osgi.stop()
self._osgi = None
def _register_bridge(self, context, java_configuration):
"""
Instantiates and registers the iPOJO components instantiation handler
inside the OSGi framework
:param context: An OSGi bundle context
:param java_configuration: The Java boot configuration
"""
# Make a Java proxy of the bridge
bridge_java = self._java.make_proxy(
PyBridge(self._context, self._java, java_configuration,
self._config, self._bridge_callback))
# Register it to the framework
self._bridge_reg = context.registerService(
PyBridge.JAVA_INTERFACE, bridge_java, None)
def _register_herald_bridge(self, context):
"""
Registers the Herald EventFactory service inside the OSGi framework
:param context: An OSGi bundle context
"""
# Make a Java proxy of the Herald bridge
herald_java = self._java.make_proxy(EventFactory(self._java))
# Register it to the framework
props = self._java.load_class("java.util.Hashtable")()
props.put("service.ranking", 1000)
self._bridge_reg = context.registerService(
EventFactory.JAVA_INTERFACE, herald_java, props)
@staticmethod
def _bridge_callback(success, message):
"""
Called back by the Python-Java bridge
:param success: If True, all components have been started, else an
error occurred
:param message: A call back message
"""
if success:
_logger.debug("Bridge success: %s", message)
else:
_logger.warning("Bridge error: %s", message)
def _find_osgi_jar(self, osgi_jar, symbolic_name):
"""
Looks for the OSGi framework JAR file matching the given parameters
:param osgi_jar: An OSGi framework JAR file name
:param symbolic_name: An OSGi framework symbolic name
:return: A (file name, framework factory) tuple
:raise ValueError: No OSGi framework found
"""
try:
# We've been given a specific JAR file or symbolic name
osgi_bundle = self._repository.get_artifact(symbolic_name,
filename=osgi_jar)
except ValueError:
# Bundle not found
for bundle in self._repository.filter_services(FRAMEWORK_SERVICE):
# Get the first found framework
osgi_bundle = bundle
break
else:
# No match found
raise ValueError("No OSGi framework found in repository")
# Found !
return osgi_bundle.file, osgi_bundle.get_service(FRAMEWORK_SERVICE)
def load(self, configuration):
"""
Loads the Java OSGi isolate
:param configuration: Isolate configuration dictionary (required)
:raise KeyError: A mandatory property is missing
:raise ValueError: Invalid parameter/file encountered or the JVM
can't be loaded
:raise BundleException: Error installing a bundle
:raise Exception: Error instantiating a component
"""
if not configuration:
raise KeyError("A configuration is required to load a "
"Java OSGi isolate")
_logger.debug("configuration {0}".format(configuration))
# Parse the configuration (boot-like part) -> Might raise error
java_config = self._config.load_boot_dict(configuration)
# Find the OSGi JAR file to use
osgi_jar_file, factory_name = self._find_osgi_jar(
configuration.get('osgi_jar'), configuration.get('osgi_name'))
_logger.debug("Using OSGi JAR file: %s", osgi_jar_file)
# Prepare the VM arguments
classpath = [osgi_jar_file]
# Find the bridge API JAR file
api_jar = self._repository.get_artifact(PYTHON_BRIDGE_BUNDLE_API)
if api_jar:
# Add the bundle to the class path...
classpath.append(api_jar.file)
else:
raise Exception("Python bridge API bundle is missing")
# Find the Herald API JAR file
herald_event_jar = self._repository.get_artifact(
HERALD_BUNDLE_API)
if herald_event_jar:
# Add the bundle to the class path...
classpath.append(herald_event_jar.file)
else:
raise Exception("Herald Event API bundle is missing")
# Start the JVM
_logger.debug("Starting JVM...")
self._start_jvm(configuration.get('vm_args'), classpath,
configuration.get('vm_properties'))
# Patch for Mac OS X:
# GUI library must be loaded early in the main thread
if sys.platform == 'darwin':
# We need this dark magic stuff for dummy OSes
self._java.load_class("java.awt.Color")
# Load the FrameworkFactory implementation
_logger.debug("Loading OSGi FrameworkFactory: %s", factory_name)
factory_class = self._java.load_class(factory_name)
factory = factory_class()
# Retrieve extra packages
vm_args = configuration.get('vm_args')
tmp = []
if vm_args:
tmp = [vm_arg for vm_arg in configuration.get('vm_args')
if FRAMEWORK_SYSTEMPACKAGES_EXTRA in vm_arg]
extra_packages = ""
if len(tmp) > 0:
extra_packages = tmp[0].split("=")[1]
# Framework properties
osgi_properties = self._setup_osgi_properties(java_config.properties,
api_jar is not None,
extra_packages)
# Start a framework, with the given properties
self._osgi = factory.newFramework(osgi_properties)
self._osgi.start()
context = self._osgi.getBundleContext()
# Register the Herald Event API bridge
self._register_herald_bridge(context)
# Install bundles
java_bundles = []
# Install the bridge
bundle = self._repository.get_artifact(PYTHON_BRIDGE_BUNDLE)
if not bundle:
_logger.warning("No Python bridge bundle found")
else:
_logger.debug("Installing PyBridge bundle: %s", bundle.url)
java_bundles.append(context.installBundle(bundle.url))
# Install the configured bundles
for bundle_conf in java_config.bundles:
bundle = self._repository.get_artifact(
bundle_conf.name, bundle_conf.version, bundle_conf.filename)
if not bundle:
if not bundle_conf.optional:
raise ValueError("Bundle not found: {0}"
.format(bundle_conf))
else:
_logger.warning("Bundle not found: %s", bundle_conf)
elif bundle.file == osgi_jar_file:
_logger.debug("OSGi framework is already installed.")
else:
_logger.debug("Installing Java bundle %s (is_fragment=%s)...", bundle.name, bundle.is_fragment())
b = context.installBundle(bundle.url)
if not bundle.is_fragment():
java_bundles.append(b)
try:
# Start the bundles
for bundle in java_bundles:
_logger.debug("Starting %s...", bundle.getSymbolicName())
bundle.start()
except jpype.JavaException as ex:
# Log the bundle exception and its cause
_logger.error("Error starting bundle: %s",
ex.__javaobject__.toString())
cause = ex.__javaobject__.getCause()
while cause is not None:
_logger.error("... caused by: %s", cause.toString())
cause = cause.getCause()
# Raise exception to the caller
raise
# Start the component instantiation handler
# (once all bundles have been installed)
self._register_bridge(context, java_config)
def wait(self):
"""
Waits for the isolate to stop
"""
if not self._osgi:
# Nothing to do
return
# Wait for the OSGi framework to stop
try:
self._osgi.waitForStop(0)
except Exception as ex:
_logger.exception("Error waiting for the OSGi framework "
"to stop: %s", ex)
raise
@Validate
def validate(self, context):
"""
Component validated
:param context: The bundle context
"""
# Update the finder
self._finder.update_roots()
# Store the framework access
self._context = context
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
# Stop the framework
self._close_osgi()
# Clear the JVM
self._java.stop()
# Clear the framework access
self._context = None
|
{
"content_hash": "da469b5acef8bf1a448458bb7fe4aad3",
"timestamp": "",
"source": "github",
"line_count": 778,
"max_line_length": 113,
"avg_line_length": 32.68637532133676,
"alnum_prop": 0.5767204089657885,
"repo_name": "isandlaTech/cohorte-runtime",
"id": "1620a2d5f69105149e0c24cc257b55e40f733b95",
"size": "25484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/cohorte/boot/loaders/osgi_inner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "15319"
},
{
"name": "HTML",
"bytes": "12092"
},
{
"name": "Java",
"bytes": "985958"
},
{
"name": "JavaScript",
"bytes": "107910"
},
{
"name": "Python",
"bytes": "3715788"
},
{
"name": "Shell",
"bytes": "2095"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gc
import os
import pickle
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
@test_util.with_control_flow_v2
class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEmpty(gc.garbage)
super(ResourceVariableOpsTest, self).tearDown()
@test_util.run_deprecated_v1
def testHandleDtypeShapeMatch(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[0],
dtype=dtypes.int32)).run()
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
0,
dtype=dtypes.int32)).run()
@test_util.run_gpu_only
def testGPUInt64(self):
with context.eager_mode(), context.device("gpu:0"):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
self.assertAllEqual(1, v.numpy())
def testEagerNameNotIdentity(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
self.assertAllEqual(v0.numpy(), 1.0)
self.assertAllEqual(v1.numpy(), 2.0)
def testEagerNameNotNeeded(self):
with context.eager_mode():
v0 = resource_variable_ops.ResourceVariable(1.0)
self.assertAllEqual(v0.numpy(), 1.0)
def testReadVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(handle, 1)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to read variable with wrong dtype. "
"Expected float got int32"):
_ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
def testEagerInitializedValue(self):
with context.eager_mode():
variable = resource_variable_ops.ResourceVariable(1.0, name="eager-init")
self.assertAllEqual(variable.numpy(), 1.0)
self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
def testInitializeVariableUsingInitializedValue(self):
var1 = resource_variable_ops.ResourceVariable(1.0, name="var1")
var2 = resource_variable_ops.ResourceVariable(var1.initialized_value(),
name="var2")
self.assertAllEqual(var2.initialized_value(), 1.0)
def testEagerBool(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(False, name="bool_test")
self.assertAllEqual(bool(v), False)
def testEagerDeepCopy(self):
with context.eager_mode():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
copied_variable = copy.deepcopy(variable)
copied_variable.assign(4 * np.ones((4, 4, 4)))
# Copying the variable should create a new underlying tensor with distinct
# values.
self.assertFalse(np.allclose(variable.numpy(), copied_variable.numpy()))
@test_util.run_deprecated_v1
def testGraphDeepCopy(self):
with self.cached_session():
init_value = np.ones((4, 4, 4))
variable = resource_variable_ops.ResourceVariable(init_value,
name="init")
with self.assertRaises(NotImplementedError):
copy.deepcopy(variable)
@test_util.run_in_graph_and_eager_modes
def testStridedSliceAssign(self):
v = resource_variable_ops.ResourceVariable([1.0, 2.0])
self.evaluate(variables.global_variables_initializer())
self.evaluate(v[0].assign(2.0))
self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testVariableShape(self):
v = resource_variable_ops.ResourceVariable([1., 1.])
self.assertAllEqual(
tensor_util.constant_value(
resource_variable_ops.variable_shape(v.handle)),
[2])
@test_util.run_deprecated_v1
def testDifferentAssignGraph(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
ops.reset_default_graph()
v.assign(2.0) # Note: this fails if we run convert_to_tensor on not the
# variable graph.
@test_util.run_deprecated_v1
def testFetchHandle(self):
with self.cached_session():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertNotEmpty(handle.eval())
@test_util.run_deprecated_v1
def testCachedValueReadBeforeWrite(self):
with self.cached_session() as sess:
v = resource_variable_ops.ResourceVariable(0.0, caching_device="cpu:0")
self.evaluate(v.initializer)
value, _ = sess.run([v, v.assign_add(1.0)])
self.assertAllEqual(value, 0.0)
def testAssignVariableDtypeMismatchEager(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1]))
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "Trying to assign variable with wrong "
"dtype. Expected int32 got float"):
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([1.], dtype=dtypes.float32))
def testUnprintableHandle(self):
with context.eager_mode():
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1], name="foo")
self.assertIn("<unprintable>", str(handle))
self.assertIn("<unprintable>", repr(handle))
@test_util.run_in_graph_and_eager_modes
def testDtypeSurvivesIdentity(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
self.evaluate(resource_variable_ops.assign_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)))
def testUnreadOpName(self):
v = resource_variable_ops.ResourceVariable(1.0)
self.assertNotEqual(v.name, v.assign_add(1.0).name)
@test_util.run_in_graph_and_eager_modes
def testCreateRead(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
value = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertAllEqual(1, value)
@test_util.run_in_graph_and_eager_modes
def testManyAssigns(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = self.evaluate([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
@test_util.run_in_graph_and_eager_modes
def testAssignAdd(self):
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
self.evaluate(resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)))
read = self.evaluate(
resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
self.assertEqual(read, 2)
@test_util.run_in_graph_and_eager_modes
def testScatterAdd(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNd(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1, 1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads), [[0., 0.], [0., 1.]])
@test_util.run_deprecated_v1
def testDefaultGradientDtype(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float64)
c = constant_op.constant(1.)
identity = array_ops.identity_n([c, v.handle])
# TODO(b/137403775): Remove this.
custom_gradient.copy_handle_data(v.handle, identity[1])
g = gradients_impl.gradients(identity[0], [c, v.handle])
self.assertEqual(g[1].dtype, dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g[1], [[0., 0.], [0., 0.]])
@test_util.run_deprecated_v1
def testUnconnectedGradientZeros(self):
b = resource_variable_ops.ResourceVariable(initial_value=[[3., 4.]])
c = constant_op.constant(0.)
g = gradients_impl.gradients(c, [b], unconnected_gradients="zero")[0]
self.assertAllEqual(g.shape.as_list(), [1, 2])
@test_util.run_in_graph_and_eager_modes
def testGradientGatherNdIndexedSlices(self):
v = resource_variable_ops.ResourceVariable(
np.random.uniform(size=[2, 2]), dtype=dtypes.float32)
with backprop.GradientTape() as tape:
l = array_ops.gather_nd(v, [[1], [1]])
l = math_ops.reduce_sum(l)
grads = tape.gradient(l, v)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(grads.values), [[1., 1.], [1., 1.]])
@test_util.run_in_graph_and_eager_modes
def testScatterSub(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMul(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant([[5]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
def testEagerPickle(self):
with context.eager_mode():
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, "var.pickle")
with open(fname, "wb") as f:
v = resource_variable_ops.ResourceVariable(
10.0,
dtype=dtypes.float16,
name="v")
pickle.dump(v, f)
with open(fname, "rb") as f:
new_v = pickle.load(f)
self.assertEqual(new_v.name, v.name)
self.assertEqual(new_v.shape, v.shape)
self.assertEqual(new_v.dtype, v.dtype)
self.assertEqual(new_v.trainable, v.trainable)
self.assertAllEqual(new_v.numpy(), v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterDiv(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
def testUseResource(self):
v = variables.VariableV1(1.0, use_resource=True)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
def testEagerNoUseResource(self):
with context.eager_mode():
v = variables.Variable(1.0)
self.assertIsInstance(v, resource_variable_ops.ResourceVariable)
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[[6]],
dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(handle, [0],
constant_op.constant(
[[3]],
dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
def testMetagraph(self):
with ops.Graph().as_default():
with variable_scope.variable_scope("foo", use_resource=True):
a = variable_scope.get_variable("a", initializer=10.0)
momentum.MomentumOptimizer(
learning_rate=0.001, momentum=0.1).minimize(
a,
colocate_gradients_with_ops=True,
global_step=training_util.get_or_create_global_step())
graph = ops.get_default_graph()
meta_graph_def = saver.export_meta_graph(graph=graph)
with ops.Graph().as_default():
saver.import_meta_graph(meta_graph_def, import_scope="")
meta_graph_two = saver.export_meta_graph(graph=graph)
self.assertEqual(meta_graph_def, meta_graph_two)
@test_util.run_in_graph_and_eager_modes
def testScatterMax(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_add(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterSubScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_sub(
handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[-1]])
@test_util.run_in_graph_and_eager_modes
def testScatterMulScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[1]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_mul(
handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[5]])
@test_util.run_in_graph_and_eager_modes
def testScatterDivScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_div(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
@test_util.run_in_graph_and_eager_modes
def testScatterMinScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_min(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[3]])
@test_util.run_in_graph_and_eager_modes
def testScatterMaxScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.int32, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(
handle, constant_op.constant([[6]], dtype=dtypes.int32)))
self.evaluate(
resource_variable_ops.resource_scatter_max(
handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[6]])
@test_util.run_in_graph_and_eager_modes
def testScatterAddVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.5], name="add")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_add(ops.IndexedSlices(indices=[1], values=[2.5])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterSubVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 2.5], name="sub")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_sub(ops.IndexedSlices(indices=[1], values=[1.5])))
self.assertAllEqual([0.0, 1.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMaxVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="max1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 5.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="max2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_max(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.5], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMinVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="min1")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[5.0])))
self.assertAllEqual([0.0, 4.0], self.evaluate(v))
v = resource_variable_ops.ResourceVariable([0.0, 3.5], name="min2")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_min(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 2.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterMulVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 4.0], name="mul")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_mul(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 12.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterDivVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="div")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_div(ops.IndexedSlices(indices=[1], values=[2.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateVariableMethod(self):
v = resource_variable_ops.ResourceVariable([0.0, 6.0], name="update")
self.evaluate(variables.global_variables_initializer())
self.evaluate(
v.scatter_update(ops.IndexedSlices(indices=[1], values=[3.0])))
self.assertAllEqual([0.0, 3.0], self.evaluate(v))
@test_util.run_deprecated_v1
def testScatterUpdateString(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(resource_variable_ops.assign_variable_op(
handle, constant_op.constant([["a"]], dtype=dtypes.string)))
self.evaluate(resource_variable_ops.resource_scatter_update(
handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
compat.as_bytes("b"))
@test_util.run_deprecated_v1
def testScatterUpdateStringScalar(self):
handle = resource_variable_ops.var_handle_op(
dtype=dtypes.string, shape=[1, 1])
self.evaluate(
resource_variable_ops.assign_variable_op(handle,
constant_op.constant(
[["a"]],
dtype=dtypes.string)))
self.evaluate(
resource_variable_ops.resource_scatter_update(handle, [0],
constant_op.constant(
"b",
dtype=dtypes.string)))
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
self.assertEqual(
compat.as_bytes(self.evaluate(read)[0][0]), compat.as_bytes("b"))
# TODO(alive): get this to work in Eager mode.
def testGPU(self):
with test_util.use_gpu():
abc = variable_scope.get_variable(
"abc",
shape=[1],
initializer=init_ops.ones_initializer(),
use_resource=True)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(
resource_variable_ops.var_is_initialized_op(abc.handle)),
True)
def testScatterBool(self):
with context.eager_mode():
ref = resource_variable_ops.ResourceVariable(
[False, True, False], trainable=False)
indices = math_ops.range(3)
updates = constant_op.constant([True, True, True])
state_ops.scatter_update(ref, indices, updates)
self.assertAllEqual(ref.read_value(), [True, True, True])
@test_util.run_in_graph_and_eager_modes
def testConstraintArg(self):
constraint = lambda x: x
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var0")
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, constraint=constraint, name="var1")
# TODO(alive): how should this work in Eager mode?
@test_util.run_deprecated_v1
def testInitFn(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32)
self.assertEqual(v.handle.op.colocation_groups(),
v.initializer.inputs[1].op.colocation_groups())
def testCountUpTo(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(v.count_up_to(1), 0)
with self.assertRaises(errors.OutOfRangeError):
v.count_up_to(1)
def testCountUpToFunction(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(0, name="upto")
self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
with self.assertRaises(errors.OutOfRangeError):
state_ops.count_up_to(v, 1)
@test_util.run_in_graph_and_eager_modes
def testInitFnDtype(self):
v = resource_variable_ops.ResourceVariable(
initial_value=lambda: 1, dtype=dtypes.float32, name="var0")
self.assertEqual(dtypes.float32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitFnNoDtype(self):
v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="var2")
self.assertEqual(dtypes.int32, v.value().dtype)
@test_util.run_in_graph_and_eager_modes
def testInitializeAllVariables(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.float32,
name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testOperatorOverload(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(2.0, self.evaluate(v + v))
@test_util.run_in_graph_and_eager_modes
def testAssignMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign(2.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign(3.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign(4.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testLoad(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
v.load(2.0)
self.assertEqual(2.0, self.evaluate(v.value()))
def testShapePassedToGradient(self):
with ops.Graph().as_default():
@custom_gradient.custom_gradient
def differentiable_scatter_update(handle, indices, values):
with ops.control_dependencies([
resource_variable_ops.resource_scatter_update(
handle, indices, values)]):
new_handle = array_ops.identity(handle)
def grad(dresult):
self.assertIsNotNone(
tensor_util.constant_value(dresult.dense_shape))
return [dresult, None, None]
return new_handle, grad
var = variable_scope.get_variable(
"foo", shape=[20], initializer=init_ops.zeros_initializer,
dtype=dtypes.float64, use_resource=True)
indices = math_ops.range(10)
updates = math_ops.range(9, -1, -1, dtype=dtypes.float64)
new_handle = differentiable_scatter_update(var.handle, indices, updates)
gathered = resource_variable_ops.resource_gather(
new_handle, indices, dtype=var.dtype)
gradients_impl.gradients([gathered], [updates])
def testToFromProtoCachedValue(self):
with ops.Graph().as_default():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
v_prime = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertIsNone(getattr(v_prime, "_cached_value", None))
other_v_def = resource_variable_ops.ResourceVariable(
caching_device="cpu:0",
initial_value=constant_op.constant(3.0)).to_proto()
other_v_prime = resource_variable_ops.ResourceVariable(
variable_def=other_v_def)
self.assertIsNotNone(other_v_prime._cached_value)
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session():
v_def = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session():
# v describes a VariableDef-based variable without an initial value.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, v.initialized_value().eval())
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session():
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = resource_variable_ops.ResourceVariable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = resource_variable_ops.ResourceVariable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
resource_variable_ops.ResourceVariable(
variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = resource_variable_ops.ResourceVariable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
resource_variable_ops.ResourceVariable(
variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_in_graph_and_eager_modes
def testSparseRead(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value = self.evaluate(v.sparse_read([0, 3, 1, 2]))
self.assertAllEqual(init_value[[0, 3, 1, 2], ...], value)
@test_util.run_in_graph_and_eager_modes
def testGatherNd(self):
init_value = np.reshape(np.arange(np.power(4, 3)), (4, 4, 4))
v = resource_variable_ops.ResourceVariable(
constant_op.constant(init_value, dtype=dtypes.int32), name="var3")
self.evaluate(variables.global_variables_initializer())
value_op = v.gather_nd([[0, 0], [1, 2], [3, 3]])
self.assertAllEqual([3, 4], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([[0, 1, 2, 3], [24, 25, 26, 27], [60, 61, 62, 63]],
value)
value_op = v.gather_nd([[0, 0, 0], [1, 2, 3], [3, 3, 3]])
self.assertAllEqual([3], value_op.shape)
value = self.evaluate(value_op)
self.assertAllEqual([0, 27, 63], value)
@test_util.run_deprecated_v1
def testToFromProto(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertEqual(2, math_ops.add(w, 1).eval())
self.assertEqual(v._handle, w._handle)
self.assertEqual(v._graph_element, w._graph_element)
@test_util.run_in_graph_and_eager_modes
def testAssignAddMethod(self):
v = resource_variable_ops.ResourceVariable(1.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_add(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_add(1.0, read_value=True)
self.assertEqual(3.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_add(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(4.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
def testAssignSubMethod(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.evaluate(v.assign_sub(1.0))
self.assertEqual(2.0, self.evaluate(v.value()))
# Tests for the 'read_value' argument:
assign_with_read = v.assign_sub(1.0, read_value=True)
self.assertEqual(1.0, self.evaluate(assign_with_read))
assign_without_read = v.assign_sub(1.0, read_value=False)
if context.executing_eagerly():
self.assertIsNone(assign_without_read)
else:
self.assertIsInstance(assign_without_read, ops.Operation)
self.evaluate(assign_without_read)
self.assertEqual(0.0, self.evaluate(v.value()))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testDestroyResource(self):
v = resource_variable_ops.ResourceVariable(3.0, name="var0")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3.0, self.evaluate(v.value()))
self.evaluate(resource_variable_ops.destroy_resource_op(v.handle))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(v.value())
# Handle to a resource not actually created.
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
# Should raise no exception
self.evaluate(resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True))
@test_util.run_deprecated_v1
def testAssignDifferentShapes(self):
with self.cached_session() as sess, variable_scope.variable_scope(
"foo", use_resource=True):
var = variable_scope.get_variable("x", shape=[1, 1], dtype=dtypes.float32)
placeholder = array_ops.placeholder(dtypes.float32)
assign = var.assign(placeholder)
sess.run(
[assign],
feed_dict={placeholder: np.zeros(shape=[2, 2], dtype=np.float32)})
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
with variable_scope.variable_scope("foo"):
var = variable_scope.get_variable("x", shape=[1, 1],
dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError,
"Shapes.*and.*are incompatible"):
assign = var.assign(np.zeros(shape=[2, 2]))
self.evaluate(assign)
@test_util.disable_xla("XLA doesn't allow changing shape at assignment, as "
"dictated by tf2xla/xla_resource.cc:SetTypeAndShape")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.run_in_graph_and_eager_modes
def testInitValueWrongShape(self):
with self.assertRaisesWithPredicateMatch(
ValueError, r"not compatible with"):
var = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(shape=[3]),
shape=[4])
self.evaluate(variables.global_variables_initializer())
self.evaluate(var.read_value())
@test_util.run_deprecated_v1
def testDtypeAfterFromProto(self):
v = resource_variable_ops.ResourceVariable(2.0)
w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
self.assertIsInstance(w.dtype, dtypes.DType)
self.assertEqual(v.dtype, w.dtype)
# TODO(alive): get caching to work in eager mode.
@test_util.run_deprecated_v1
def testCachingDevice(self):
with ops.device("/job:server/task:1"):
v = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", v.value().device)
with self.assertRaises(ValueError):
_ = v.value().op.get_attr("_class")
with ops.colocate_with(v.op):
w = resource_variable_ops.ResourceVariable(
2.0, caching_device="/job:localhost")
self.assertEqual("/job:localhost", w.value().device)
with self.assertRaises(ValueError):
_ = w.value().op.get_attr("_class")
@test_util.run_deprecated_v1
def testSharedName(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(300.0, name="var4")
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var4",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
x = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="var5",
container=ops.get_default_graph()._container)
with self.assertRaisesOpError(
"(Resource .*/var5/.* does not exist|Read of uninitialized variable)"
):
resource_variable_ops.read_variable_op(x, v.dtype.base_dtype).eval()
@test_util.run_deprecated_v1
def testSharedNameWithNamescope(self):
with self.cached_session():
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(300.0, name="var6")
self.assertEqual("foo/var6", v._shared_name) # pylint: disable=protected-access
self.assertEqual("foo/var6:0", v.name)
self.evaluate(variables.global_variables_initializer())
w = resource_variable_ops.var_handle_op(
dtype=v.dtype.base_dtype, shape=v.get_shape(), shared_name="foo/var6",
# Needed in Eager since we get a unique container name by default.
container=ops.get_default_graph()._container)
w_read = resource_variable_ops.read_variable_op(w, v.dtype.base_dtype)
self.assertEqual(300.0, self.evaluate(w_read))
@test_util.run_in_graph_and_eager_modes
def testShape(self):
v = resource_variable_ops.ResourceVariable(
name="var4", initial_value=array_ops.ones(shape=[10, 20, 35]))
self.assertEqual("(10, 20, 35)", str(v.shape))
self.assertEqual("(10, 20, 35)", str(v.get_shape()))
self.assertEqual("(10, 20, 35)", str(v.value().shape))
self.assertEqual("(3, 20, 35)", str(v.sparse_read([0, 1, 2]).shape))
if not context.executing_eagerly():
self.assertEqual(
"<unknown>",
str(v.sparse_read(array_ops.placeholder(dtypes.int32)).shape))
@test_util.run_deprecated_v1
def testSetInitialValue(self):
with self.cached_session():
# Initialize variable with a value different from the initial value passed
# in the constructor.
v = resource_variable_ops.ResourceVariable(2.0)
v.initializer.run(feed_dict={v.initial_value: 3.0})
self.assertEqual(3.0, v.value().eval())
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = resource_variable_ops.ResourceVariable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "initializer"):
control_flow_ops.while_loop(cond, body, [0, 0])
def testVariableEager(self):
with context.eager_mode():
init = array_ops.ones(shape=[10, 20, 35], dtype=dtypes.int32)
constraint = lambda x: x
with ops.name_scope("foo"):
v = resource_variable_ops.ResourceVariable(
name="var7",
initial_value=init,
caching_device="cpu:0",
constraint=constraint)
# Test properties
self.assertEqual(dtypes.int32, v.dtype)
self.assertEqual("foo/var7:0", v.name)
self.assertAllEqual([10, 20, 35], v.shape.as_list())
self.assertIsInstance(v.handle, ops.EagerTensor)
self.assertEqual(constraint, v.constraint)
self.assertAllEqual(init.numpy(), v.read_value().numpy())
self.assertAllEqual(init.numpy(), v.value().numpy())
# Callable init.
callable_init = lambda: init * 2
v2 = resource_variable_ops.ResourceVariable(
initial_value=callable_init, name="var7")
self.assertEqual("var7:0", v2.name)
self.assertAllEqual(2 * init.numpy(), v2.read_value().numpy())
# Test assign_add.
new_v2_val = v2.assign_add(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 3, new_v2_val.numpy())
# Test assign_sub.
new_v2_val = v2.assign_sub(v.read_value())
self.assertAllEqual(v.read_value().numpy() * 2, new_v2_val.numpy())
# Test assign.
v2.assign(v.read_value())
self.assertAllEqual(v.read_value().numpy(), v2.read_value().numpy())
# Test load
v2.load(2 * v.read_value())
self.assertAllEqual(2 * v.read_value().numpy(), v2.read_value().numpy())
# Test convert_to_tensor
t = ops.convert_to_tensor(v)
self.assertAllEqual(t.numpy(), v.read_value().numpy())
# Test operations
self.assertAllEqual((v * 2).numpy(), (v + v).numpy())
def testContainerEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
name="same")
with ops.container("different"):
v2 = resource_variable_ops.ResourceVariable(initial_value=lambda: 0,
name="same")
v2.assign(2)
self.assertEqual(1, v1.read_value().numpy())
self.assertEqual(2, v2.read_value().numpy())
def testDestruction(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable(initial_value=1.0,
name="var8")
var_handle = var._handle
del var
with self.assertRaisesRegexp(errors.NotFoundError,
r"Resource .* does not exist."):
resource_variable_ops.destroy_resource_op(var_handle,
ignore_lookup_error=False)
def testScatterUpdate(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3.0])
self.assertAllEqual([1.0, 3.0], v.numpy())
def testScatterAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="add")
state_ops.scatter_add(v, [1], [3])
self.assertAllEqual([1.0, 5.0], v.numpy())
def testScatterSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
state_ops.scatter_sub(v, [1], [3])
self.assertAllEqual([1.0, -1.0], v.numpy())
def testScatterUpdateVariant(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([
list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
])
v.scatter_update(
ops.IndexedSlices(
list_ops.tensor_list_from_tensor([1., 2.], element_shape=[]), 0))
self.assertAllEqual(
list_ops.tensor_list_get_item(v[0], 0, element_dtype=dtypes.float32),
1.)
def testGroupDoesntForceRead(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
assign = v.assign_add(1.0)
g = control_flow_ops.group([assign])
self.assertEqual(g.control_inputs[0].type, "AssignAddVariableOp")
def testScatterNdAddStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="add")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
state_ops.scatter_nd_add(v, indices, updates)
self.assertAllClose(expected, v.numpy())
@test_util.run_in_graph_and_eager_modes
def testUnreadVariableInsideFunction(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def assign():
v.assign(1.0)
graph = assign.get_concrete_function().graph
self.assertTrue(all(x.type != "ReadVariableOp"
for x in graph.get_operations()))
def testScatterNdSubStateOps(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable(
[1, 2, 3, 4, 5, 6, 7, 8], dtype=dtypes.float32, name="sub")
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
state_ops.scatter_nd_sub(v, indices, updates)
self.assertAllClose(expected, v.numpy())
def testScatterUpdateCast(self):
with context.eager_mode():
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="update")
state_ops.scatter_update(v, [1], [3])
self.assertAllEqual([1.0, 3.0], v.numpy())
@test_util.run_in_graph_and_eager_modes
def testScatterUpdateInvalidArgs(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3], name="update")
# The exact error and message differ between graph construction (where the
# error is realized during shape inference at graph construction time) and
# eager execution (where the error is realized during kernel execution).
with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
state_ops.scatter_update(v, [0, 1], [0, 1, 2])
@test_util.run_in_graph_and_eager_modes
def testAssignIncompatibleShape(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
self.evaluate(v.initializer)
pattern = re.compile("shapes must be equal", re.IGNORECASE)
with self.assertRaisesRegexp(Exception, pattern):
self.evaluate(v.assign_add(1))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testCopyToGraphUninitialized(self):
v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
copy_to_graph = ops.Graph()
with copy_to_graph.as_default(): # Intentionally testing v1 behavior
copied = resource_variable_ops.copy_to_graph_uninitialized(v)
self.assertEqual(v.name, copied.name)
self.assertIsNone(copied.initializer)
def create_variant_shape_and_type_data(self):
variant_shape_and_type_data = (
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData())
variant_shape_and_type_data.is_set = True
stored_shape = tensor_shape.TensorShape([None, 4]).as_proto()
stored_dtype = dtypes.float32.as_datatype_enum
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=stored_shape, dtype=stored_dtype)])
return variant_shape_and_type_data
@def_function.function
def create_constant_variant(self, value):
value = constant_op.constant(
tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto(),
variant_val=[
tensor_pb2.VariantTensorDataProto(
# Match registration in variant_op_registry.cc
type_name=b"int",
metadata=np.array(value, dtype=np.int32).tobytes())
]))
return value
# TODO(ebrevdo): Add run_in_graph_and_eager_modes once we can create
# EagerTensor constants with TensorProto inputs.
@test_util.run_in_graph_and_eager_modes()
def testVariantInitializer(self):
variant_shape_and_type_data = self.create_variant_shape_and_type_data()
value = self.create_constant_variant(3)
initializer = array_ops.fill([3], value)
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
initializer, variant_shape_and_type_data,
graph_mode=not context.executing_eagerly())
v = resource_variable_ops.ResourceVariable(initializer)
read = array_ops.identity(v)
read_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(read))
self.assertEqual(
read_variant_shape_and_type, variant_shape_and_type_data)
gather = v.sparse_read([0])
gather_variant_shape_and_type = (
resource_variable_ops.get_eager_safe_handle_data(gather))
self.assertEqual(
gather_variant_shape_and_type, variant_shape_and_type_data)
# Make sure initializer runs.
if not context.executing_eagerly():
self.evaluate(v.initializer)
self.evaluate(read.op)
self.evaluate(gather.op)
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1 (equivalent to
# tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDims(self, params, indices, batch_dims, expected):
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=0,
output_shape=[2, 3, 8, 9, 10, 3, 4, 5, 6, 7]
# = indices.shape + params.shape[1:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=1,
output_shape=[2, 3, 8, 9, 10, 4, 5, 6, 7]
# = params.shape[:1] + indices.shape[1:] + params.shape[2:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 9, 10],
batch_dims=3,
output_shape=[2, 3, 4, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[3:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 4, 5, 10],
batch_dims=4,
output_shape=[2, 3, 4, 5, 10, 7]
# = params.shape[:4] + indices.shape[4:] + params.shape[5:]
),
])
@test_util.run_in_graph_and_eager_modes
def testGatherWithBatchDimsMatchesTensor(self, params_shape, indices_shape,
batch_dims, output_shape):
"""Checks that gather with batch_dims returns the correct shape."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size, dtype=np.int32), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size, dtype=np.int32), indices_shape)
indices = indices % params_shape[batch_dims]
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
expected = array_ops.gather(
var.read_value(), indices, batch_dims=batch_dims)
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=var.dtype, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(dtype=dtypes.bool),
dict(dtype=dtypes.int64),
dict(dtype=dtypes.half),
dict(dtype=dtypes.float32),
dict(dtype=dtypes.double),
])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testGatherWithDTypes(self, dtype):
if dtype == dtypes.bool:
params = constant_op.constant([False, True, False, True])
expected = constant_op.constant([[False, True], [False, True]])
else:
params = constant_op.constant([6, 7, 8, 9], dtype=dtype)
expected = constant_op.constant([[8, 7], [6, 9]], dtype=dtype)
indices = constant_op.constant([[2, 1], [0, 3]])
var = resource_variable_ops.ResourceVariable(params, name="var0")
with ops.control_dependencies([var.initializer]):
result = resource_variable_ops.resource_gather(
var.handle, indices, dtype=dtype)
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "6a619c6331661328615d5ae411b01f08",
"timestamp": "",
"source": "github",
"line_count": 1413,
"max_line_length": 91,
"avg_line_length": 42.29299363057325,
"alnum_prop": 0.6416331994645248,
"repo_name": "chemelnucfin/tensorflow",
"id": "b36b252bd81332a7f35d840be3cab88ab1cc03a9",
"size": "60449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/resource_variable_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""
Provides the ability to run test on a standalone Django app.
"""
import sys
from optparse import OptionParser
from settings import configure_settings
# Configure the default settings
configure_settings()
# Django nose must be imported here since it depends on the settings being configured
from django_nose import NoseTestSuiteRunner
def run_tests(*test_args, **kwargs):
if not test_args:
test_args = ['issue']
kwargs.setdefault('interactive', False)
test_runner = NoseTestSuiteRunner(**kwargs)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--verbosity', dest='verbosity', action='store', default=1, type=int)
(options, args) = parser.parse_args()
run_tests(*args, **options.__dict__)
|
{
"content_hash": "f6e7a6b749e0da32a0385092e4e2d30d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 91,
"avg_line_length": 24.470588235294116,
"alnum_prop": 0.7007211538461539,
"repo_name": "wesleykendall/django-issue",
"id": "c41022484ce495b3970b0fa815395a5724a17f8e",
"size": "832",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "run_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52722"
}
],
"symlink_target": ""
}
|
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
None,
gen_nn_ops.conv2d_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()),
gen_nn_ops.conv2d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. See _Conv2DGrad.
return [
gen_nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()), None,
gen_nn_ops.conv2d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [
None,
gen_nn_ops.depthwise_conv2d_native_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")),
gen_nn_ops.depthwise_conv2d_native(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterGrad(op, grad):
return [
gen_nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")), None,
gen_nn_ops.depthwise_conv2d_native(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
None,
nn_ops.conv3d_backprop_filter_v2(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format), None,
nn_ops.conv3d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the softmax
output.
Returns:
gradient w.r.t the input to the softmax
"""
softmax = op.outputs[0]
sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
return (grad_softmax - sum_channels) * softmax
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad,
gen_nn_ops.bias_add_grad(
out_backprop=received_grad, data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:1]), bias_shape,
array_ops.ones_like(shape[2:])
], 0)
tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops.relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops.elu_grad(grad, elu_x),
array_ops.where(
elu_x < 0, grad * op.inputs[0], array_ops.zeros_like(elu_x)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
selu_x = op.inputs[1]
return (gen_nn_ops.selu_grad(grad, selu_x),
array_ops.where(
selu_x < 0., grad * op.inputs[0], array_ops.zeros_like(selu_x)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops.relu6_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6Grad")
def _Relu6GradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu6_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("LeakyRelu")
def _LeakyReluGrad(op, grad):
x = op.inputs[0]
alpha = op.get_attr("alpha")
return gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha)
@ops.RegisterGradient("LeakyReluGrad")
def _LeakyReluGradGrad(op, grad):
x = op.inputs[1]
alpha = op.get_attr("alpha")
return (gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops.elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops.selu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return grad * math_ops.sigmoid(op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad]):
ddy = gen_nn_ops.softplus_grad(grad, x)
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops.softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
logits = op.inputs[0]
if (grad_grad is not None and
not getattr(grad_grad, "_is_zeros_tensor", False)):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
# There is no gradient for the labels
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
logits = op.inputs[0]
if (grad_grad is not None and
not getattr(grad_grad, "_is_zeros_tensor", False)):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
"""Gradient function for Conv2D."""
dilations = op.get_attr("dilations")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
explicit_paddings = op.get_attr("explicit_paddings")
use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
data_format = op.get_attr("data_format")
shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. gen_nn_ops functions take a
# `explicit_paddings` parameter, but nn_ops functions do not. So if were were
# to use the nn_ops functions, we would have to convert `padding` and
# `explicit_paddings` into a single `padding` parameter, increasing overhead
# in Eager mode.
return [
gen_nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format),
gen_nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
gen_nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format")),
gen_nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [
nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [
gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias,
alpha, beta)
]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops.avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops.max_pool_grad(
op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
explicit_paddings=op.get_attr("explicit_paddings"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(
op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
del unused_argmax_grad
return gen_nn_ops.max_pool_grad_with_argmax(
op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
include_batch_in_index=op.get_attr("include_batch_in_index"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
return gen_nn_ops.fractional_max_pool_grad(
op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
def _BaseFusedBatchNormGrad(op, version, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
version: Integer indicating which version to use of the fused batch
norm gradient.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
if version == 2:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v3
elif version == 1:
grad_fun = gen_nn_ops.fused_batch_norm_grad_v2
else:
grad_fun = gen_nn_ops.fused_batch_norm_grad
if is_training:
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": op.outputs[3],
"reserve_space_2": op.outputs[4],
"epsilon": epsilon,
"data_format": data_format,
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
dx, dscale, doffset, _, _ = grad_fun(**args)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])
elif data_format == b"NCDHW":
x = array_ops.transpose(x, [0, 2, 3, 4, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 4, 1])
target_data_format = ("NHWC" if data_format in (b"NCHW",
b"NHWC") else "NDHWC")
args = {
"y_backprop": grad_y,
"x": x,
"scale": scale,
"reserve_space_1": pop_mean,
"reserve_space_2": pop_var,
"epsilon": epsilon,
"data_format": target_data_format,
"is_training": is_training
}
if version == 2:
args["reserve_space_3"] = op.outputs[5]
dx, dscale, doffset, _, _ = grad_fun(**args)
if data_format == b"NCHW":
dx = array_ops.transpose(dx, [0, 3, 1, 2])
elif data_format == b"NCDHW":
dx = array_ops.transpose(dx, [0, 4, 1, 2, 3])
return dx, dscale, doffset, None, None
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
return _BaseFusedBatchNormGrad(op, 0, *grad)
@ops.RegisterGradient("FusedBatchNormV2")
def _FusedBatchNormV2Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 1, *grad)
@ops.RegisterGradient("FusedBatchNormV3")
def _FusedBatchNormV3Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, 2, *grad)
def _BatchNormGrad(grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon,
data_format,
is_training=True):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 or 5 dimensions for gradient for y.
x: A `Tensor` of 4 or 5 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default) or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16:
# float16 math is too imprecise, so we do the batch norm gradient
# computations in float32.
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
keepdims = False
reduce_axis = [0, 1, 2]
elif data_format == b"NDHWC":
keepdims = False
reduce_axis = [0, 1, 2, 3]
elif data_format == b"NCHW":
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
else:
keepdims = True
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(scale), 1, 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW" or data_format == b"NCDHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
else:
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
elif data_format == b"NDHWC":
reduce_axis = [0, 1, 2, 3]
elif data_format == b"NCHW":
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
else:
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(pop_mean), 1, 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs with
grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as
grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
is_training = op.get_attr("is_training")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
with backprop.GradientTape() as tape:
tape.watch(grad_y)
tape.watch(x)
tape.watch(scale)
grad_x, grad_scale, grad_offset = _BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = tape.gradient(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("FusedBatchNormGradV2")
def _FusedBatchNormGradGradV2(op, *grad):
return _FusedBatchNormGradGrad(op, *grad)
@ops.RegisterGradient("FusedBatchNormGradV3")
def _FusedBatchNormGradGradV3(op, *grad):
grad_grad_y, grad_x, grad_scale, _, _ = _FusedBatchNormGradGrad(op, *grad)
return grad_grad_y, grad_x, grad_scale, None, None, None
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
# int32 is not supported on GPU hence up-casting
ind_lastdim = array_ops.gather(
math_ops.cast(ind_shape, dtypes.int64),
array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(
math_ops.cast(in_shape, dtypes.int64),
array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(
ind_2d + math_ops.cast(
array_ops.expand_dims(
math_ops.range(0,
math_ops.cast(outerdim, dtypes.int64) * in_lastdim,
in_lastdim), -1), dtypes.int32), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
array_ops.scatter_nd(
array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]),
[math_ops.reduce_prod(in_shape)]), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
@ops.RegisterGradient("NthElement")
def _NthElementGrad(op, grad):
"""Return the gradients for NthElement.
Args:
op: The NthElementOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the NthElementOp
Returns:
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
# dimension. If there are multiple elements then the gradient will be
# divided between them.
indicators = math_ops.cast(
math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)
grad = array_ops.expand_dims(grad, -1)
num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)
return [math_ops.divide(indicators, num_selected) * grad, None]
def _MeanAggregator(inputs, segments):
"""Replaces each segment with its mean along the last axis.
Specifically, each value in the `inputs` tensor gets replaced by the mean
value computed from the values that belong to the same segment.
Args:
inputs: A 2-tensor. Aggregation is done over dimension 1.
segments: A 2-tensor, same shape as `input`.
Returns:
The result, same shape and type as `inputs`.
"""
result = []
for inputs_i, segments_i in zip(
array_ops.split(inputs, inputs.shape[0]),
array_ops.split(segments, segments.shape[0])):
# Note that we do not use tf.math.segment_mean, as it has no TPU support.
means_i = math_ops.unsorted_segment_mean(
inputs_i, segments_i, num_segments=math_ops.reduce_max(segments_i) + 1)
result.append(
array_ops.reshape(array_ops.gather(means_i, segments_i), [-1]))
return array_ops.stack(result, axis=0)
# We have to register the gradients for these ops so that tensorflow will know
# how to differentiate them.
@ops.RegisterGradient("IsotonicRegression")
def _IsotonicRegressionGrad(op, grad_output, grad_segments):
"""Gradient for the isotonic regression function.
Args:
op: The IsotonicRegression tensorflow op.
grad_output: Tensor of incoming gradients with respect to the output.
grad_segments: Tensor of incoming gradients with respect to the segments.
Returns:
A tensor, same size as `grad_output` with the gradient with respect to
the input.
"""
del grad_segments # Discrete, non-differentiable.
segments = op.outputs[1]
return _MeanAggregator(grad_output, segments)
|
{
"content_hash": "b537bba03a747515d4bc1d2bb6a69e66",
"timestamp": "",
"source": "github",
"line_count": 1199,
"max_line_length": 80,
"avg_line_length": 33.98415346121768,
"alnum_prop": 0.6332981569195278,
"repo_name": "aam-at/tensorflow",
"id": "a02e31f80a5e82fdc10bc55f1aab1022220eba47",
"size": "41436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/nn_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
import datetime
import sqlite3
from os import getcwd, startfile
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
from misc_files import common_vars
from settings import settings_window
from video_entry import entry_screen
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# SQLite connections
settings_conn = sqlite3.connect(common_vars.settings_db())
settings_cursor = settings_conn.cursor()
video_db_conn = sqlite3.connect(common_vars.video_db())
video_db_cursor = video_db_conn.cursor()
# Misc variables
leftWidth = 270
rightWidth = 270
settings_cursor.execute('SELECT path_to_db FROM db_settings')
currentWorkingDB = settings_cursor.fetchone()[0]
# Layout initialization
self.vLayoutMaster = QtWidgets.QVBoxLayout()
self.hLayoutTopBar = QtWidgets.QHBoxLayout()
self.hLayoutTopBar_L = QtWidgets.QHBoxLayout()
self.hLayoutTopBar_L.setAlignment(QtCore.Qt.AlignLeft)
self.hLayoutTopBar_Ctr = QtWidgets.QHBoxLayout()
self.hLayoutTopBar_R = QtWidgets.QHBoxLayout()
self.hLayoutTopBar_R.setAlignment(QtCore.Qt.AlignRight)
self.hLayoutCenter = QtWidgets.QHBoxLayout()
self.vLayoutLeftBar = QtWidgets.QVBoxLayout()
self.gridRightBar = QtWidgets.QGridLayout()
# Top bar - L
self.boldFont = QtGui.QFont()
self.boldFont.setBold(True)
self.boldFont.setPixelSize(20)
self.addVideoBtn = QtWidgets.QPushButton('+')
self.addVideoBtn.setFont(self.boldFont)
self.addVideoBtn.setFixedSize(40, 40)
self.addVideoBtn.setToolTip('Add new video to database')
self.custListIcon = QtGui.QIcon(getcwd() + '/icons/cl-icon.png')
self.custListBtn = QtWidgets.QPushButton()
self.custListBtn.setIcon(self.custListIcon)
self.custListBtn.setFixedSize(40, 40)
self.custListBtn.setToolTip('Manage custom lists')
# Top bar - R
self.settingsIcon = QtGui.QIcon(getcwd() + '/icons/settings-icon.png')
self.settingsBtn = QtWidgets.QPushButton()
self.settingsBtn.setFixedSize(40, 40)
self.settingsBtn.setIcon(self.settingsIcon)
self.settingsBtn.setToolTip('AMV Tracker settings')
self.statsIcon = QtGui.QIcon(getcwd() + '/icons/stats-icon.png')
self.statsBtn = QtWidgets.QPushButton()
self.statsBtn.setFixedSize(40, 40)
self.statsBtn.setIcon(self.statsIcon)
self.statsBtn.setToolTip('Database stats and analytics')
self.updateBtn = QtWidgets.QPushButton(u'\u2191')
self.updateBtn.setFont(self.boldFont)
self.updateBtn.setFixedSize(40, 40)
self.updateBtn.setToolTip('Check for update')
# Mid: left bar
self.scrollWidget_L = QtWidgets.QWidget()
self.scrollArea_L = QtWidgets.QScrollArea()
self.scrollArea_L.setFixedWidth(leftWidth)
self.largeFont = QtGui.QFont()
self.largeFont.setPixelSize(14)
self.subDBLabel = QtWidgets.QLabel()
self.subDBLabel.setText('Sub-DB:')
self.subDBLabel.setFont(self.largeFont)
self.subDBList = [k for k, v in common_vars.sub_db_lookup().items()]
self.subDBDrop = QtWidgets.QComboBox()
self.subDBDrop.setFont(self.largeFont)
for subdb in self.subDBList:
self.subDBDrop.addItem(subdb)
self.basicFiltersLabel = QtWidgets.QLabel()
self.basicFiltersLabel.setText('Filter by:')
self.basicFiltersLabel.setFont(self.largeFont)
self.basicFiltersList = ['Studio', 'Year released', 'Star rating', 'Video footage', 'Song artist', 'Song genre',
'Video length', 'My rating', 'Notable videos', 'Favorited videos',
'Date added to database', 'Custom list', 'Editor username']
self.basicFiltersList.sort()
self.basicFiltersList.insert(0, 'Show all')
self.basicFiltersDrop = QtWidgets.QComboBox()
for item in self.basicFiltersList:
self.basicFiltersDrop.addItem(item)
self.basicFiltersDrop.setFixedWidth(230)
self.basicFiltersDrop.setFont(self.largeFont)
self.basicFiltersDrop.setMaxVisibleItems(15)
self.basicFilterListWid = QtWidgets.QListWidget()
self.basicFilterListWid.setFixedSize(230, 700)
self.vLayoutLeftBar.addWidget(self.subDBLabel)
self.vLayoutLeftBar.addWidget(self.subDBDrop)
self.vLayoutLeftBar.addSpacing(15)
self.vLayoutLeftBar.addWidget(self.basicFiltersLabel)
self.vLayoutLeftBar.addWidget(self.basicFiltersDrop)
self.vLayoutLeftBar.addWidget(self.basicFilterListWid)
# Mid: center
self.searchTable = QtWidgets.QTableWidget()
self.init_table()
# Mid: right bar
self.scrollWidget_R = QtWidgets.QWidget()
self.scrollArea_R = QtWidgets.QScrollArea()
self.scrollArea_R.setFixedWidth(rightWidth)
# Bottom bar
self.cwdLabel = QtWidgets.QLabel()
self.cwdLabel.setText('Current working database: ' + currentWorkingDB)
# Top layout size restriction
self.leftWidget = QtWidgets.QWidget()
self.leftWidget.setLayout(self.hLayoutTopBar_L)
self.leftWidget.setFixedWidth(leftWidth)
self.rightWidget = QtWidgets.QWidget()
self.rightWidget.setLayout(self.hLayoutTopBar_R)
self.rightWidget.setFixedWidth(rightWidth)
# Set layouts
self.hLayoutTopBar_L.addWidget(self.addVideoBtn, alignment=QtCore.Qt.AlignLeft)
self.hLayoutTopBar_L.addWidget(self.custListBtn, alignment=QtCore.Qt.AlignLeft)
self.hLayoutTopBar_R.addWidget(self.settingsBtn, alignment=QtCore.Qt.AlignRight)
self.hLayoutTopBar_R.addWidget(self.statsBtn, alignment=QtCore.Qt.AlignRight)
self.hLayoutTopBar_R.addWidget(self.updateBtn, alignment=QtCore.Qt.AlignRight)
self.hLayoutTopBar.addWidget(self.leftWidget, alignment=QtCore.Qt.AlignLeft)
self.hLayoutTopBar.addLayout(self.hLayoutTopBar_Ctr)
self.hLayoutTopBar.addWidget(self.rightWidget, alignment=QtCore.Qt.AlignRight)
self.scrollWidget_L.setLayout(self.vLayoutLeftBar)
self.scrollArea_L.setWidget(self.scrollWidget_L)
self.scrollWidget_R.setLayout(self.gridRightBar)
self.scrollArea_R.setWidget(self.scrollWidget_R)
self.hLayoutCenter.addWidget(self.scrollArea_L, alignment=QtCore.Qt.AlignLeft)
self.hLayoutCenter.addWidget(self.searchTable)
self.hLayoutCenter.addWidget(self.scrollArea_R, alignment=QtCore.Qt.AlignRight)
self.vLayoutMaster.addLayout(self.hLayoutTopBar)
self.vLayoutMaster.addLayout(self.hLayoutCenter)
self.vLayoutMaster.addWidget(self.cwdLabel, alignment=QtCore.Qt.AlignRight)
# Populate table
self.basic_filter_selected()
# Signals / slots
self.addVideoBtn.clicked.connect(self.add_video_pushed)
self.settingsBtn.clicked.connect(self.settings_button_pushed)
self.subDBDrop.currentIndexChanged.connect(self.basic_filter_dropdown_clicked)
self.basicFiltersDrop.currentIndexChanged.connect(self.basic_filter_dropdown_clicked)
self.basicFilterListWid.itemClicked.connect(self.basic_filter_selected)
self.searchTable.cellClicked.connect(lambda: self.table_cell_clicked(
int(self.searchTable.currentRow()), int(self.searchTable.currentColumn()),
self.searchTable.item(self.searchTable.currentRow(), 0).text()))
# Widget
self.mainWid = QtWidgets.QWidget()
self.mainWid.setLayout(self.vLayoutMaster)
self.setCentralWidget(self.mainWid)
self.setWindowTitle('AMV Tracker')
video_db_conn.close()
settings_conn.close()
def add_video_pushed(self):
self.add_video = entry_screen.VideoEntry()
self.add_video.show()
def settings_button_pushed(self):
self.settings_screen = settings_window.SettingsWindow()
self.settings_screen.show()
def init_table(self):
init_tab_sett_conn = sqlite3.connect(common_vars.settings_db())
init_tab_sett_cursor = init_tab_sett_conn.cursor()
init_tab_sett_cursor.execute('SELECT field_name_display, displ_order, col_width FROM search_field_lookup WHERE '
'visible_in_search_view = 1')
field_data = init_tab_sett_cursor.fetchall()
field_data.sort(key=lambda x: int(x[1]))
table_header_dict = {x[0]: x[2] for x in field_data}
table_header_dict['Edit entry'] = 70
table_header_dict['Watch'] = 60
table_header_list = [x[0] for x in field_data]
table_header_list.insert(1, 'Edit entry')
table_header_list.insert(2, 'Watch')
self.searchTable.setColumnCount(len(table_header_list))
self.searchTable.setHorizontalHeaderLabels(table_header_list)
for ind in range(0, len(table_header_list)):
self.searchTable.setColumnWidth(ind, table_header_dict[self.searchTable.horizontalHeaderItem(ind).text()])
self.searchTable.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.searchTable.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.searchTable.setColumnHidden(0, True) # Hide VidID column
def basic_filter_dropdown_clicked(self):
self.basicFilterListWid.clear()
bf_drop_conn = sqlite3.connect(common_vars.video_db())
bf_drop_cursor = bf_drop_conn.cursor()
bf_drop_sub_db_friendly = self.subDBDrop.currentText()
bf_drop_sub_db_internal = common_vars.sub_db_lookup()[bf_drop_sub_db_friendly]
filter_text = self.basicFiltersDrop.currentText()
if filter_text == 'Show all':
list_wid_pop = []
self.basic_filter_selected()
elif filter_text == 'Custom list':
list_wid_pop = [k for k, v in common_vars.custom_list_lookup().items()]
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Date added to database':
list_wid_pop = ['Today', 'Yesterday', 'Last 7 days', 'Last 30 days', 'Last 60 days', 'Last 90 days',
'Last 6 months', 'Last 12 months', 'Last 24 months']
elif filter_text == 'Editor username':
bf_drop_cursor.execute('SELECT primary_editor_username FROM {}'.format(bf_drop_sub_db_internal))
editors = bf_drop_cursor.fetchall()
list_wid_pop = list(set(y for x in editors for y in x))
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Year released':
bf_drop_cursor.execute('SELECT release_date FROM {}'.format(bf_drop_sub_db_internal))
dates = bf_drop_cursor.fetchall()
list_wid_pop = list(set([y[:4] for x in dates for y in x]))
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort()
list_wid_pop.insert(0, 'Unknown')
list_wid_pop.insert(0, 'Not specified')
elif filter_text == 'Favorited videos':
list_wid_pop = ['Marked as favorite', 'Not marked as favorite']
elif filter_text == 'My rating':
list_wid_pop = [str(rat * 0.5) for rat in range(0, 21)]
list_wid_pop.insert(0, 'Unrated')
elif filter_text == 'Notable videos':
list_wid_pop = ['Marked as notable', 'Not marked as notable']
elif filter_text == 'Song artist':
bf_drop_cursor.execute('SELECT song_artist FROM {}'.format(bf_drop_sub_db_internal))
artists = bf_drop_cursor.fetchall()
list_wid_pop = list(set(y for x in artists for y in x))
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Song genre':
bf_drop_cursor.execute('SELECT song_genre FROM {}'.format(bf_drop_sub_db_internal))
song_genres = bf_drop_cursor.fetchall()
list_wid_pop = list(set(y for x in song_genres for y in x))
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Star rating':
list_wid_pop = ['Unrated or 0.0', '0.50 - 1.99', '2.00 - 2.49', '2.50 - 2.99', '3.00 - 3.49', '3.50 - 3.99',
'4.00 - 4.49', '4.50 - 5.00']
elif filter_text == 'Studio':
bf_drop_cursor.execute('SELECT studio FROM {}'.format(bf_drop_sub_db_internal))
studios = bf_drop_cursor.fetchall()
list_wid_pop = list(set(y for x in studios for y in x))
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Video footage':
list_wid_pop = []
bf_drop_cursor.execute('SELECT video_footage FROM {}'.format(bf_drop_sub_db_internal))
for ftg_tup in bf_drop_cursor.fetchall():
for ftg_grp in list(ftg_tup):
for ftg in ftg_grp.split('; '):
if ftg not in list_wid_pop:
list_wid_pop.append(ftg)
if '' in list_wid_pop:
list_wid_pop.remove('')
list_wid_pop.sort(key=lambda x: x.casefold())
elif filter_text == 'Video length':
list_wid_pop = [str(x * 30) + ' - ' + str(((x + 1) * 30) - 1) + ' sec' for x in range(0, 14)]
list_wid_pop.append('420+ sec')
list_wid_pop.insert(0, 'Not specified')
else:
list_wid_pop = []
for item in list_wid_pop:
self.basicFilterListWid.addItem(item)
bf_drop_conn.close()
def basic_filter_selected(self):
bf_conn = sqlite3.connect(common_vars.video_db())
bf_cursor = bf_conn.cursor()
bf_sel_subdb_friendly = self.subDBDrop.currentText()
bf_sel_subdb_internal = common_vars.sub_db_lookup()[bf_sel_subdb_friendly]
vidids_list = []
output_vidids_list = []
filter_by_text = self.basicFiltersDrop.currentText()
sel_filter = ''
if filter_by_text == 'Show all':
bf_cursor.execute('SELECT video_id FROM {}'.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
else:
sel_filter = self.basicFilterListWid.currentItem().text()
if filter_by_text == 'Custom list':
bf_cursor.execute('SELECT vid_ids FROM custom_lists WHERE list_name = ?', (sel_filter,))
output_vidids_list = bf_cursor.fetchall()[0][0].split('; ')
elif filter_by_text == 'Date added to database':
today = datetime.date.today()
bf_cursor.execute('SELECT video_id, date_entered FROM {}'.format(bf_sel_subdb_internal))
for tup in bf_cursor.fetchall():
if tup[1] != '':
ent_date_list = [int(x) for x in tup[1].split('/')]
ent_date = datetime.date(ent_date_list[0], ent_date_list[1], ent_date_list[2])
delta = today - ent_date
vidids_list.append((tup[0], delta.days))
for vid in vidids_list:
if (sel_filter == 'Today' and vid[1] == 0) or \
(sel_filter == 'Yesterday' and vid[1] == 1) or \
(sel_filter == 'Last 7 days' and vid[1] <= 7) or \
(sel_filter == 'Last 30 days' and vid[1] <= 30) or \
(sel_filter == 'Last 60 days' and vid[1] <= 60) or \
(sel_filter == 'Last 90 days' and vid[1] <= 90) or \
(sel_filter == 'Last 6 months' and vid[1] <= 180) or \
(sel_filter == 'Last 12 months' and vid[1] <= 365) or \
(sel_filter == 'Last 24 months' and vid[1] <= 730):
output_vidids_list.append(vid[0])
elif filter_by_text == 'Editor username':
bf_cursor.execute('SELECT video_id FROM {} WHERE primary_editor_username = ? OR '
'primary_editor_pseudonyms LIKE ? OR addl_editors LIKE ?'.format(bf_sel_subdb_internal),
(sel_filter, sel_filter, sel_filter))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Favorited videos':
if sel_filter == 'Marked as favorite':
fav = 1
else:
fav = 0
bf_cursor.execute('SELECT video_id FROM {} WHERE favorite = ?'.format(bf_sel_subdb_internal), (fav,))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'My rating':
if sel_filter == 'Unrated':
mr_inp_text = ''
else:
mr_inp_text = sel_filter
bf_cursor.execute('SELECT video_id FROM {} WHERE my_rating = ?'.format(bf_sel_subdb_internal),
(mr_inp_text,))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Notable videos':
if sel_filter == 'Marked as notable':
notable = 1
else:
notable = 0
bf_cursor.execute('SELECT video_id FROM {} WHERE notable = ?'.format(bf_sel_subdb_internal), (notable,))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Song artist' or filter_by_text == 'Song genre' or filter_by_text == 'Studio':
column_name = filter_by_text.lower().replace(' ', '_')
bf_cursor.execute('SELECT video_id FROM {} WHERE {} = ?'.format(bf_sel_subdb_internal, column_name),
(sel_filter,))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Star rating':
if sel_filter == 'Unrated or 0.0':
bf_cursor.execute('SELECT video_id FROM {} WHERE star_rating = "" or star_rating = 0.0'
.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
else:
star_rat_rng = [float(x) for x in sel_filter.split(' - ')]
bf_cursor.execute('SELECT video_id, star_rating FROM {} WHERE star_rating != ""'
.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
if star_rat_rng[0] <= float(vidid_tup[1]) <= star_rat_rng[1]:
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Video footage':
bf_cursor.execute('SELECT video_id, video_footage FROM {}'.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
for ftg in vidid_tup[1].split('; '):
if sel_filter == ftg:
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Video length':
if sel_filter == 'Not specified':
bf_cursor.execute('SELECT video_id FROM {} WHERE video_length = ""'.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
else:
bf_cursor.execute('SELECT video_id, video_length FROM {} WHERE video_length != ""'
.format(bf_sel_subdb_internal))
if sel_filter == '420+ sec':
for vidid_tup in bf_cursor.fetchall():
if int(vidid_tup[1]) >= 420:
output_vidids_list.append(vidid_tup[0])
else:
dur_rng = [int(x) for x in sel_filter[:-4].split(' - ')]
for vidid_tup in bf_cursor.fetchall():
if dur_rng[0] <= vidid_tup[1] <= dur_rng[1]:
output_vidids_list.append(vidid_tup[0])
elif filter_by_text == 'Year released':
if sel_filter == 'Not specified':
bf_cursor.execute('SELECT video_id WHERE release_date = "" AND release_date_unknown = 0')
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
elif sel_filter == 'Unknown':
bf_cursor.execute('SELECT video_id WHERE release_date_unknown = 1')
for vidid_tup in bf_cursor.fetchall():
output_vidids_list.append(vidid_tup[0])
else:
bf_cursor.execute('SELECT video_id, release_date FROM {}'.format(bf_sel_subdb_internal))
for vidid_tup in bf_cursor.fetchall():
if sel_filter == vidid_tup[1][:4]:
output_vidids_list.append(vidid_tup[0])
bf_conn.close()
self.populate_table(output_vidids_list)
def populate_table(self, inp_vidids):
self.searchTable.setRowCount(0)
sub_db = common_vars.sub_db_lookup()[self.subDBDrop.currentText()]
pop_table_db_conn = sqlite3.connect(common_vars.video_db())
pop_table_db_cursor = pop_table_db_conn.cursor()
pop_table_settings_conn = sqlite3.connect(common_vars.settings_db())
pop_table_settings_cursor = pop_table_settings_conn.cursor()
pop_table_settings_cursor.execute('SELECT field_name_internal, displ_order FROM search_field_lookup WHERE '
'visible_in_search_view = 1')
field_lookup_dict = dict(
(x[0], x[1] + 2) if x[1] != 0 else (x[0], x[1]) for x in pop_table_settings_cursor.fetchall())
watch_icon = QtGui.QIcon(getcwd() + '/icons/play-icon.png')
edit_icon = QtGui.QIcon(getcwd() + '/icons/edit-icon.png')
matching_vids = []
for vidid in inp_vidids:
pop_table_db_cursor.execute('SELECT primary_editor_username FROM {} WHERE video_id = ?'.format(sub_db),
(vidid,))
matching_vids.append(pop_table_db_cursor.fetchone())
matching_vid_check = [x for x in matching_vids if x is not None]
self.searchTable.setSortingEnabled(False)
if matching_vid_check != []: # If there is at least one result in the sub-db
for row in range(0, len(inp_vidids)):
self.searchTable.insertRow(row)
for field, col in field_lookup_dict.items():
query = 'SELECT {} FROM {} '.format(field, sub_db)
pop_table_db_cursor.execute(query + 'WHERE video_id = ?', (inp_vidids[row],))
temp_val = pop_table_db_cursor.fetchall()[0][0]
pop_table_db_cursor.execute('SELECT local_file FROM {} WHERE video_id = ?'.format(sub_db),
(inp_vidids[row],))
loc_file_check = pop_table_db_cursor.fetchall()[0][0]
if loc_file_check != '':
loc_file_pop = True
else:
loc_file_pop = False
# Populating play local video icon
if loc_file_pop:
watch_icon_item = QtWidgets.QTableWidgetItem()
watch_icon_item.setIcon(watch_icon)
watch_icon_to_insert = QtWidgets.QTableWidgetItem(watch_icon_item)
self.searchTable.setItem(row, 2, watch_icon_to_insert)
# Populating edit icon
edit_icon_item = QtWidgets.QTableWidgetItem()
edit_icon_item.setIcon(edit_icon)
edit_icon_to_insert = QtWidgets.QTableWidgetItem(edit_icon_item)
self.searchTable.setItem(row, 1, edit_icon_to_insert)
# Populating table with data from db file
if temp_val is None:
val_to_insert = QtWidgets.QTableWidgetItem('')
else:
if field == 'star_rating' or field == 'my_rating':
val_to_insert = QtWidgets.QTableWidgetItem()
val_to_insert.setTextAlignment(QtCore.Qt.AlignCenter)
val_to_insert.setData(QtCore.Qt.DisplayRole, temp_val)
elif field == 'video_length' or field == 'sequence':
val_to_insert = QtWidgets.QTableWidgetItem()
val_to_insert.setTextAlignment(QtCore.Qt.AlignCenter)
val_to_insert.setData(QtCore.Qt.DisplayRole, temp_val)
else:
val_to_insert = QtWidgets.QTableWidgetItem(str(temp_val))
self.searchTable.setItem(row, col, val_to_insert)
self.searchTable.setSortingEnabled(True)
self.searchTable.sortByColumn(field_lookup_dict['video_title'], QtCore.Qt.AscendingOrder)
self.searchTable.sortByColumn(field_lookup_dict['primary_editor_username'], QtCore.Qt.AscendingOrder)
pop_table_db_conn.close()
pop_table_settings_conn.close()
def update_col_width(self):
pass
def table_cell_clicked(self, row, col, vidid):
cell_clicked_db_conn = sqlite3.connect(common_vars.video_db())
cell_clicked_db_cursor = cell_clicked_db_conn.cursor()
subdb = common_vars.sub_db_lookup()[self.subDBDrop.currentText()]
if col == 2:
cell_clicked_db_cursor.execute('SELECT local_file FROM {} WHERE video_id = ?'.format(subdb), (vidid,))
file_path = cell_clicked_db_cursor.fetchone()[0].replace('\\', '/')
if file_path != '':
try:
startfile(file_path)
except:
file_not_found_msg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Information, 'File not found',
'Local file not found. Please check the file path in the\n'
'video\'s AMV Tracker profile.')
file_not_found_msg.exec_()
else:
no_file_msg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Information, 'No local file specified',
'You have not specified a local file path for this video. Please\n'
'go to the video profile to add a local file path.')
no_file_msg.exec_()
|
{
"content_hash": "7209f2512291b08ff3d6f3e05acf7790",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 114,
"avg_line_length": 41.195729537366546,
"alnum_prop": 0.6861178299930891,
"repo_name": "bsobotka/amv_tracker",
"id": "21f2f3536348aacada2db88dbd327112256a4290",
"size": "23152",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "main_window/mainwindow - Copy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "584445"
}
],
"symlink_target": ""
}
|
"""Tests for the DeterminantalPointProcess distribution."""
import contextlib
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import bernoulli as bernoulli_lib
from tensorflow_probability.python.distributions import dpp as dpp_lib
from tensorflow_probability.python.distributions import uniform
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math.psd_kernels import exponentiated_quadratic as tfpk
@contextlib.contextmanager
def _capture_bernoulli_samples():
"""Use monkey-patching to capture the output of an Bernoulli sample."""
observations = []
true_sample = bernoulli_lib.Bernoulli.sample
def _capturing_sample(
self, sample_shape=(), seed=None, name='sample', **kwargs):
samples = true_sample(self, sample_shape, seed, name, **kwargs)
observations.append(samples)
return samples
bernoulli_lib.Bernoulli.sample = _capturing_sample
try:
yield observations
finally:
bernoulli_lib.Bernoulli.sample = true_sample
def kernel_over_unit_square(n_points, batch_shape=(), dtype=tf.float32):
kernel = tfpk.ExponentiatedQuadratic(amplitude=tf.ones([], dtype),
length_scale=0.1)
pts = uniform.Uniform(tf.zeros([2], dtype), tf.ones([2], dtype)).sample(
tuple(batch_shape) + (n_points,), seed=test_util.test_seed())
kernel_mat = kernel.matrix(pts, pts) + 1e-3 * tf.eye(n_points, dtype=dtype)
eigvals, eigvecs = tf.linalg.eigh(kernel_mat)
return kernel_mat, eigvals, eigvecs
@test_util.test_all_tf_execution_regimes
class _DppTest(test_util.TestCase):
fast_path_enabled = True
param_dtype = tf.float32
def setUp(self):
dpp_lib.FAST_PATH_ENABLED = self.fast_path_enabled
super().setUp()
@parameterized.parameters(
(
[10],
[5, 10, 10],
[5],
10,
),
(
[5, 10],
[10, 10],
[5],
10,
),
(
[10],
[10, 10],
[],
10,
),
(
[1, 10],
[10, 10],
[1],
10,
),
(
[5, 3, 4],
[4, 4],
[5, 3],
4,
),
)
def testShapes(self, eigvals_shape, eigvecs_shape, expected_batch_shape,
n_points):
eigvals = tf.ones(eigvals_shape, dtype=self.param_dtype)
eigvecs = tf.zeros(eigvecs_shape, dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
self.assertAllEqual(expected_batch_shape, dpp.batch_shape)
self.assertAllEqual(expected_batch_shape, dpp.batch_shape_tensor())
self.assertAllEqual([n_points], dpp.event_shape_tensor())
self.assertAllEqual([n_points], dpp.event_shape)
@parameterized.named_parameters(
('empty', (), 10, []),
('size5', (), 10, [0, 1, 4, 7, 9]),
('full', (), 10, list(range(10))),
('noindices', (), 10, None),
('batch_5', (5,), 3, [0, 2]),
('batch_6x3', (6, 3), 10, [0, 1, 5, 6]),
('batch_1x2x3', (1, 2, 3), 4, [1]),
('batch_noindices', (1, 2, 3), 4, None),
)
def testReconstructMatrix(self, batch_shape, n_points, indices):
matrices, eigvals, eigvecs = self.evaluate(kernel_over_unit_square(
n_points, batch_shape=batch_shape, dtype=self.param_dtype))
if indices is not None:
indices = np.array(indices, dtype=np.int32)
one_hot_indices = tf.constant(
[(1 if i in indices else 0) for i in range(n_points)], dtype=tf.int32)
expected = matrices[..., indices[:, np.newaxis], indices]
else:
one_hot_indices = None
expected = matrices
reconstructed = dpp_lib._reconstruct_matrix(
tf.constant(eigvals), tf.constant(eigvecs), one_hot_indices)
if indices is None:
# When no indices are specified, we can also check the full matrix.
self.assertAllClose(expected, reconstructed, rtol=1e-5)
# logdet must always agree, even if shape does not.
self.assertAllClose(np.linalg.slogdet(expected)[1],
tf.linalg.logdet(reconstructed),
rtol=1e-5, atol=1e-5)
@parameterized.parameters(
([5], [5, 5], 10, [10, 5]),
([2, 3, 5], [5, 5], 10, [10, 2, 3, 5]),
([5], [2, 3, 5, 5], 10, [10, 2, 3, 5]),
)
def testSampleElementaryDppShape(self, eigvals_shape, eigvecs_shape,
n_samples, expected):
eigvals = tf.ones(eigvals_shape, dtype=self.param_dtype)
eigvecs = tf.eye(eigvecs_shape[-1], batch_shape=eigvecs_shape[:-2],
dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
with _capture_bernoulli_samples() as sampled_edpp_indices:
dpp.sample(n_samples, seed=test_util.test_seed())
self.assertLen(sampled_edpp_indices, 1)
self.assertAllEqual(sampled_edpp_indices[0].shape, expected)
def testSampleElementaryDppPoints(self):
"""Checks we don't sample points with corresponding eigenvalue = 0."""
n_points, batch_size = 5, 10
_, eigvals, eigvecs = kernel_over_unit_square(n_points,
dtype=self.param_dtype)
eigvals = tf.one_hot(1, n_points, dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
with _capture_bernoulli_samples() as sampled_edpp_indices:
dpp.sample(batch_size, seed=test_util.test_seed())
self.assertLen(sampled_edpp_indices, 1)
sample = sampled_edpp_indices[0]
self.assertAllEqual(sample[:, 0], tf.zeros([batch_size]))
self.assertAllEqual(sample[:, 1] * (1 - sample[:, 1]),
tf.zeros([batch_size]))
self.assertAllEqual(sample[:, 2:], tf.zeros([batch_size, n_points - 2]))
def testSampleFromEDppSize(self):
"""Tests that the selected E-DPP size is equal to the sampled size."""
n_points = 5
edpp_indices = tf.constant(
[
[0] * n_points, # Empty set.
[0, 1, 0, 0, 1],
[0, 0, 0, 1, 1],
[1] * n_points
],
dtype=tf.int32)
eigenvectors = tf.random.uniform(
[edpp_indices.shape[0], n_points, n_points], seed=test_util.test_seed(),
dtype=self.param_dtype)
samples = self.evaluate(
dpp_lib._sample_from_edpp(eigenvectors, edpp_indices,
seed=test_util.test_seed()))
actual_sizes = np.sum(samples, axis=-1)
expected_sizes = tf.reduce_sum(edpp_indices, axis=-1)
self.assertAllEqual(actual_sizes, expected_sizes)
def testSampleFromEDppDeterministic(self):
"""Tests that for diagonal kernels, we select points w/ eigenvalues > 0."""
edpp_indices = tf.constant([0, 0, 0, 1, 1, 1, 0, 1, 0, 0], dtype=tf.int32)
eigvecs = tf.eye(10, dtype=self.param_dtype)
samples = dpp_lib._sample_from_edpp(
eigvecs, edpp_indices, seed=test_util.test_seed())
self.assertAllEqual(edpp_indices, samples)
def testOrthogonalComplementEi(self):
"""Checks that row i=0 after orthogonalization."""
dim, n_vectors, i = 10, 5, 3
vectors = tf.random.normal([dim, n_vectors], seed=test_util.test_seed(),
dtype=self.param_dtype)
ortho = self.evaluate(dpp_lib._orthogonal_complement_e_i(vectors, i,
n_vectors))
self.assertAllEqual(ortho.shape, [dim, n_vectors])
self.assertAllClose(ortho[:, -1], np.zeros(dim))
self.assertAllClose(ortho[i, :], np.zeros(n_vectors))
def testDppLEnsembleMatrix(self):
n_points = 20
true_kernel, eigvals, eigvecs = self.evaluate(
kernel_over_unit_square(n_points, dtype=self.param_dtype))
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
self.assertAllClose(
true_kernel,
dpp.l_ensemble_matrix(),
rtol=1e-5, atol=1e-5)
def testDppMarginalKernel(self):
n_points = 20
true_kernel, eigvals, eigvecs = kernel_over_unit_square(
n_points, dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
marginal_kernel = tf.matmul(true_kernel,
tf.linalg.inv(true_kernel + np.eye(n_points)))
self.assertAllClose(
marginal_kernel - dpp.marginal_kernel(),
tf.zeros([n_points, n_points]),
atol=1e-5)
@parameterized.named_parameters(dict(testcase_name='_3', n_points=3),
dict(testcase_name='_4', n_points=4),
dict(testcase_name='_5', n_points=5))
def testDppLogPDF(self, n_points):
true_kernel, eigvals, eigvecs = self.evaluate(
kernel_over_unit_square(n_points, dtype=self.param_dtype))
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
log_probs = []
for i in range(2**n_points): # n_points is small so we can enumerate sets.
binary = bin(i)[2:]
subset = [0] * n_points
subset[-len(binary):] = [int(c) for c in binary]
mask = np.array(subset, np.bool_)
submatrix = true_kernel[mask][:, mask]
expected = (tf.linalg.logdet(submatrix) -
tf.linalg.logdet(true_kernel +
tf.eye(n_points, dtype=self.param_dtype)))
log_probs.append(dpp.log_prob(tf.constant(subset)))
self.assertAllClose(expected, log_probs[-1],
atol=1e-4,
msg=str(subset))
self.assertAllClose(1., tf.reduce_sum(tf.math.exp(log_probs)))
def testDppSample(self):
n_points = 50
_, eigvals, eigvecs = kernel_over_unit_square(n_points,
dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
n = 10
samples = dpp.sample(n, seed=test_util.test_seed())
self.assertEqual(samples.shape, (n, n_points))
def testDppSampleStats(self):
n_points = 5
_, eigvals, eigvecs = kernel_over_unit_square(n_points,
dtype=self.param_dtype)
dpp = dpp_lib.DeterminantalPointProcess(eigvals, eigvecs)
n = 500
samples, expected_marginals = self.evaluate(
[dpp.sample(n, seed=test_util.test_seed()),
tf.linalg.diag_part(dpp.marginal_kernel())])
counts = np.zeros(n_points)
for s in samples:
counts[np.nonzero(s)[0]] += 1.
self.assertAllClose(counts / n, expected_marginals, atol=.1)
def testEigvalsAsserts(self):
with self.assertRaisesOpError(r'must be positive'):
dpp = dpp_lib.DeterminantalPointProcess(
tf.constant([1, 2, 3, 0.], dtype=self.param_dtype),
tf.eye(4, dtype=self.param_dtype),
validate_args=True)
self.evaluate(dpp.sample(seed=test_util.test_seed()))
v = tf.Variable(tf.constant([1, 2, -3, 4.], dtype=self.param_dtype))
self.evaluate(v.initializer)
dpp = dpp_lib.DeterminantalPointProcess(
v, tf.eye(4, dtype=self.param_dtype), validate_args=True)
with self.assertRaisesOpError(r'must be positive'):
self.evaluate(dpp.sample(seed=test_util.test_seed()))
def testEigvecsAsserts(self):
with self.assertRaisesOpError(r'must be orthonormal'):
dpp = dpp_lib.DeterminantalPointProcess(
tf.ones([4], dtype=self.param_dtype),
tf.ones([4, 4], dtype=self.param_dtype) / 2,
validate_args=True)
self.evaluate(dpp.sample(seed=test_util.test_seed()))
v = tf.Variable(tf.ones([4, 4], dtype=self.param_dtype) / 2)
self.evaluate(v.initializer)
dpp = dpp_lib.DeterminantalPointProcess(
tf.ones([4], dtype=self.param_dtype), v, validate_args=True)
with self.assertRaisesOpError(r'must be orthonormal'):
self.evaluate(dpp.sample(seed=test_util.test_seed()))
with self.assertRaisesOpError(r'must be orthonormal'):
dpp = dpp.copy(eigenvectors=tf.eye(4, dtype=self.param_dtype) * .1)
self.evaluate(dpp.sample(seed=test_util.test_seed()))
self.evaluate(v.assign(tf.eye(4, dtype=self.param_dtype) * .1))
dpp = dpp.copy(eigenvectors=v)
with self.assertRaisesOpError(r'must be orthonormal'):
self.evaluate(dpp.sample(seed=test_util.test_seed()))
def testXLASample(self):
self.skip_if_no_xla()
_, eigvals, eigvecs = kernel_over_unit_square(20, dtype=self.param_dtype)
for n in 1, 5:
@tf.function(jit_compile=True)
def f(eigvals):
return dpp_lib.DeterminantalPointProcess(eigvals, eigvecs).sample(
n, seed=test_util.test_seed()) # pylint: disable=cell-var-from-loop
self.evaluate(f(eigvals))
class DppTestFast32(_DppTest):
param_dtype = tf.float32
fast_path_enabled = True
class DppTestFast64(_DppTest):
param_dtype = tf.float64
fast_path_enabled = True
class DppTest32(_DppTest):
param_dtype = tf.float32
fast_path_enabled = False
class DppTest64(_DppTest):
param_dtype = tf.float64
fast_path_enabled = False
del _DppTest
if __name__ == '__main__':
test_util.main()
|
{
"content_hash": "05a17873ffb4726ac1c84ad56738ed98",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 90,
"avg_line_length": 37.13068181818182,
"alnum_prop": 0.619433817903596,
"repo_name": "tensorflow/probability",
"id": "15ef69e06096839de8a95f3896e8875e9278575a",
"size": "13748",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/distributions/dpp_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
}
|
"""Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <issam.laradji@gmail.com>
# Andreas Mueller
# Jiyuan Qian
# Licence: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy.optimize import fmin_l_bfgs_b
import warnings
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ._base import logistic, softmax
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..externals import six
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import check_array, check_X_y, column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import _check_partial_fit_first_call
_STOCHASTIC_ALGOS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, algorithm,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon):
self.activation = activation
self.algorithm = algorithm
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations, with_output_activation=True):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
with_output_activation : bool, default True
If True, the output passes through the output activation
function, which is either the softmax function or the
logistic function
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
activations[i + 1] = hidden_activation(activations[i + 1])
# For the last layer
if with_output_activation:
output_activation = ACTIVATIONS[self.out_activation_]
activations[i + 1] = output_activation(activations[i + 1])
return activations
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
return coef_grads, intercept_grads
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in l-bfgs
Parameters
----------
packed_parameters : array-like
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
self.n_iter_ += 1
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grad : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss = LOSS_FUNCTIONS[self.loss](y, activations[-1])
# Add L2 regularization term to loss
values = np.sum(
np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
diff = y - activations[-1]
deltas[last] = -diff
# Compute gradient for the last layer
coef_grads, intercept_grads = self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
derivative = DERIVATIVES[self.activation]
deltas[i - 1] *= derivative(activations[i])
coef_grads, intercept_grads = self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not isinstance(self, ClassifierMixin):
self.out_activation_ = 'identity'
# Output for multi class
elif self.label_binarizer_.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
if self.loss == 'log_loss':
self.loss = 'binary_log_loss'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
rng = check_random_state(self.random_state)
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1],
rng)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.algorithm in _STOCHASTIC_ALGOS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out, rng):
if self.activation == 'logistic':
# Use the initialization method recommended by
# Glorot et al.
init_bound = np.sqrt(2. / (fan_in + fan_out))
elif self.activation == 'tanh':
init_bound = np.sqrt(6. / (fan_in + fan_out))
elif self.activation == 'relu':
init_bound = np.sqrt(6. / (fan_in + fan_out))
else:
# this was caught earlier, just to make sure
raise ValueError("Unknown activation function %s" %
self.activation)
coef_init = rng.uniform(-init_bound, init_bound, (fan_in, fan_out))
intercept_init = rng.uniform(-init_bound, init_bound, fan_out)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
X, y = self._validate_input(X, y, incremental)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
if not hasattr(self, 'coefs_') or (not self.warm_start and not
incremental):
# First time training the model
self._initialize(y, layer_units)
# l-bfgs does not support mini-batches
if self.algorithm == 'l-bfgs':
batch_size = n_samples
elif self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
# Initialize lists
activations = [X]
activations.extend(np.empty((batch_size, n_fan_out))
for n_fan_out in layer_units[1:])
deltas = [np.empty_like(a_layer) for a_layer in activations]
coef_grads = [np.empty((n_fan_in_, n_fan_out_)) for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_) for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization algorithm
if self.algorithm in _STOCHASTIC_ALGOS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS algorithm
elif self.algorithm == 'l-bfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
# raise ValueError if not registered
supported_activations = ['logistic', 'tanh', 'relu']
if self.activation not in supported_activations:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s." % (self.activation,
supported_activations))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
if self.algorithm not in _STOCHASTIC_ALGOS + ["l-bfgs"]:
raise ValueError("The algorithm %s is not supported. " %
self.algorithm)
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
optimal_parameters, self.loss_, d = fmin_l_bfgs_b(
x0=packed_coef_inter,
func=self._loss_grad_lbfgs,
maxfun=self.max_iter,
iprint=iprint,
pgtol=self.tol,
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self._unpack(optimal_parameters)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
rng = check_random_state(self.random_state)
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.algorithm == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.algorithm == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
X, X_val, y, y_val = train_test_split(
X, y, random_state=self.random_state,
test_size=self.validation_fraction)
if isinstance(self, ClassifierMixin):
y_val = self.label_binarizer_.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
X, y = shuffle(X, y, random_state=rng)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
activations[0] = X[batch_slice]
batch_loss, coef_grads, intercept_grads = self._backprop(
X[batch_slice], y[batch_slice], activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > 2:
# not better than last two iterations by tol.
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for two consecutive epochs." % self.tol)
else:
msg = ("Training loss did not improve more than tol=%f"
" for two consecutive epochs." % self.tol)
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn('Stochastic Optimizer: Maximum iterations'
' reached and the optimization hasn\'t '
'converged yet.'
% (), ConvergenceWarning)
except KeyboardInterrupt:
pass
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.algorithm not in _STOCHASTIC_ALGOS:
raise AttributeError("partial_fit is only available for stochastic"
"optimization algorithms. %s is not"
" stochastic" % self.algorithm)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
return self._fit(X, y, incremental=True)
def _decision_scores(self, X):
"""Predict using the trained model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = [X]
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations, with_output_activation=False)
y_pred = activations[-1]
return y_pred
class MLPClassifier(BaseMultilayerPerceptron, ClassifierMixin):
"""Multi-layer Perceptron classifier.
This algorithm optimizes the log-loss function using l-bfgs or gradient
descent.
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
algorithm : {'l-bfgs', 'sgd', 'adam'}, default 'adam'
The algorithm for weight optimization.
- 'l-bfgs' is an optimization algorithm in the family of
quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimization algorithm
proposed by Kingma, Diederik, and Jimmy Ba
Note: The default algorithm 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'l-bfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the algorithm is 'l-bfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate ``learning_rate_`` at
each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when algorithm='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The algorithm iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
algorithm='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when algorithm='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when algorithm='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when algorithm='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when algorithm='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when algorithm='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when algorithm='adam'
Attributes
----------
`classes_` : array or list of array of shape (n_classes,)
Class labels for each output.
`loss_` : float
The current loss computed with the loss function.
`label_binarizer_` : LabelBinarizer
A LabelBinarizer object trained on the training set.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the algorithm has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
algorithm='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPClassifier, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, algorithm=algorithm, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
self.label_binarizer_ = LabelBinarizer()
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
self.label_binarizer_.fit(y)
if not hasattr(self, 'classes_') or not incremental:
self.classes_ = self.label_binarizer_.classes_
else:
classes = self.label_binarizer_.classes_
if not np.all(np.in1d(classes, self.classes_)):
raise ValueError("`y` has classes not in `self.classes_`."
" `self.classes_` has %s. 'y' has %s." %
(self.classes_, classes))
y = self.label_binarizer_.transform(y)
return X, y
def decision_function(self, X):
"""Decision function of the mlp model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The values of decision function for each class in the model.
"""
check_is_fitted(self, "coefs_")
y_scores = self._decision_scores(X)
if self.n_outputs_ == 1:
return y_scores.ravel()
else:
return y_scores
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self, "coefs_")
y_scores = self.decision_function(X)
y_scores = ACTIVATIONS[self.out_activation_](y_scores)
return self.label_binarizer_.inverse_transform(y_scores)
@property
def partial_fit(self):
"""Fit the model to data matrix X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
y : array-like, shape (n_samples,)
The target values.
classes : array, shape (n_classes)
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.algorithm not in _STOCHASTIC_ALGOS:
raise AttributeError("partial_fit is only available for stochastic"
"optimization algorithms. %s is not"
" stochastic" % self.algorithm)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
_check_partial_fit_first_call(self, classes)
super(MLPClassifier, self)._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : array-like, shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
y_scores = self.decision_function(X)
if y_scores.ndim == 1:
y_scores = logistic(y_scores)
return np.vstack([1 - y_scores, y_scores]).T
else:
return softmax(y_scores)
class MLPRegressor(BaseMultilayerPerceptron, RegressorMixin):
"""Multi-layer Perceptron regressor.
This algorithm optimizes the squared-loss using l-bfgs or gradient descent.
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default (100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'logistic', 'tanh', 'relu'}, default 'relu'
Activation function for the hidden layer.
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
algorithm : {'l-bfgs', 'sgd', 'adam'}, default 'adam'
The algorithm for weight optimization.
- 'l-bfgs' is an optimization algorithm in the family of
quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimization algorithm
proposed by Kingma, Diederik, and Jimmy Ba
Note: The default algorithm 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'l-bfgs' can converge faster and perform
better.
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
batch_size : int, optional, default 'auto'
Size of minibatches for stochastic optimizers.
If the algorithm is 'l-bfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default 'constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate ``learning_rate_`` at
each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when algorithm='sgd'.
max_iter : int, optional, default 200
Maximum number of iterations. The algorithm iterates until convergence
(determined by 'tol') or this number of iterations.
random_state : int or RandomState, optional, default None
State or seed for random number generator.
shuffle : bool, optional, default True
Whether to shuffle samples in each iteration. Only used when
algorithm='sgd' or 'adam'.
tol : float, optional, default 1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least tol for two consecutive iterations, unless `learning_rate`
is set to 'adaptive', convergence is considered to be reached and
training stops.
learning_rate_init : double, optional, default 0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when algorithm='sgd' or 'adam'.
power_t : double, optional, default 0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when algorithm='sgd'.
verbose : bool, optional, default False
Whether to print progress messages to stdout.
warm_start : bool, optional, default False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution.
momentum : float, default 0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when algorithm='sgd'.
nesterovs_momentum : boolean, default True
Whether to use Nesterov's momentum. Only used when algorithm='sgd' and
momentum > 0.
early_stopping : bool, default False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for two consecutive
epochs.
Only effective when algorithm='sgd' or 'adam'
validation_fraction : float, optional, default 0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, optional, default 0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
beta_2 : float, optional, default 0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when algorithm='adam'
epsilon : float, optional, default 1e-8
Value for numerical stability in adam. Only used when algorithm='adam'
Attributes
----------
`loss_` : float
The current loss computed with the loss function.
`coefs_` : list, length n_layers - 1
The ith element in the list represents the weight matrix corresponding
to layer i.
`intercepts_` : list, length n_layers - 1
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int,
The number of iterations the algorithm has ran.
n_layers_ : int
Number of layers.
`n_outputs_` : int
Number of outputs.
`out_activation_` : string
Name of the output activation function.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
algorithm='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8):
sup = super(MLPRegressor, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, algorithm=algorithm, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y : array-like, shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self, "coefs_")
y_pred = self._decision_scores(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=True)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
|
{
"content_hash": "c986fec4f2efcaddf350e30495383a6e",
"timestamp": "",
"source": "github",
"line_count": 1268,
"max_line_length": 81,
"avg_line_length": 39.61514195583596,
"alnum_prop": 0.5877528268832617,
"repo_name": "DSLituiev/scikit-learn",
"id": "f0d6268d9d60116fd1eb5fa40cd29066176e90ec",
"size": "50232",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "sklearn/neural_network/multilayer_perceptron.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6431918"
},
{
"name": "Shell",
"bytes": "9256"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../../")
import h2o
def benign(ip,port):
# Connect to h2o
h2o.init(ip,port)
training_data = h2o.import_frame(h2o.locate("smalldata/logreg/benign.csv"))
Y = 3
X = range(3) + range(4,11)
#Log.info("Build the model")
model = h2o.glm(y=training_data[Y].asfactor(), x=training_data[X], family="binomial", alpha=[0], Lambda=[1e-5])
#Log.info("Check that the columns used in the model are the ones we passed in.")
#Log.info("===================Columns passed in: ================")
in_names = [training_data.names()[i] for i in X]
#Log.info("===================Columns passed out: ================")
out_names = [model._model_json['output']['coefficients_table'].cell_values[c][0] for c in range(len(X)+1)]
assert in_names == out_names[1:]
if __name__ == "__main__":
h2o.run_test(sys.argv, benign)
|
{
"content_hash": "52dc52cd0ac1092e14ed0f6548f90ee2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 115,
"avg_line_length": 34.23076923076923,
"alnum_prop": 0.5719101123595506,
"repo_name": "ChristosChristofidis/h2o-3",
"id": "9733392a9c65bd42384fe5ca7a39ec2f5f5c1dac",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/glm/pyunit_benignGLM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "261942"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "105430"
},
{
"name": "Java",
"bytes": "5223547"
},
{
"name": "JavaScript",
"bytes": "88331"
},
{
"name": "Makefile",
"bytes": "31513"
},
{
"name": "Python",
"bytes": "1900641"
},
{
"name": "R",
"bytes": "1611030"
},
{
"name": "Rebol",
"bytes": "23302"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "44528"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import sys
from abc import abstractmethod
from contextlib import contextmanager
from hashlib import sha1
from twitter.common.collections.orderedset import OrderedSet
from pants.base.build_invalidator import BuildInvalidator, CacheKeyGenerator
from pants.base.cache_manager import InvalidationCacheManager, InvalidationCheck
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import TaskIdentityFingerprintStrategy
from pants.base.worker_pool import Work
from pants.cache.artifact_cache import UnreadableArtifact, call_insert, call_use_cached_files
from pants.cache.cache_setup import CacheSetup
from pants.option.optionable import Optionable
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import ScopeInfo
from pants.reporting.reporting_utils import items_to_report_element
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin
from pants.util.meta import AbstractClass
class TaskBase(SubsystemClientMixin, Optionable, AbstractClass):
"""Defines a lifecycle that prepares a task for execution and provides the base machinery
needed to execute it.
Provides the base lifecycle methods that allow a task to interact with the command line, other
tasks and the user. The lifecycle is linear and run via the following sequence:
1. register_options - declare options configurable via cmd-line flag or config file.
2. product_types - declare the product types your task is capable of producing.
3. alternate_target_roots - propose a different set of target roots to use than those specified
via the CLI for the active pants run.
4. prepare - request any products needed from other tasks.
5. __init__ - distill configuration into the information needed to execute.
Provides access to the current run context for scoping work.
Also provides the basic facilities for doing work efficiently including providing a work directory
for scratch space on disk, an invalidator for checking which targets need work done on, and an
artifact cache for re-using previously cached work.
#TODO(John Sirois): Lifecycle is currently split between TaskBase and Task and lifecycle
(interface) and helpers (utility) are currently conflated. Tease these apart and narrow the scope
of the helpers. Ideally console tasks don't inherit a workdir, invalidator or build cache for
example.
"""
options_scope_category = ScopeInfo.TASK
# Tests may override this to provide a stable name despite the class name being a unique,
# synthetic name.
_stable_name = None
@classmethod
def stable_name(cls):
"""The stable name of this task type.
We synthesize subclasses of the task types at runtime, and these synthesized subclasses
may have random names (e.g., in tests), so this gives us a stable name to use across runs,
e.g., in artifact cache references.
"""
return cls._stable_name or cls._compute_stable_name()
@classmethod
def _compute_stable_name(cls):
return '{}_{}'.format(cls.__module__, cls.__name__).replace('.', '_')
@classmethod
def global_subsystems(cls):
"""The global subsystems this task uses.
A tuple of subsystem types.
"""
return tuple()
@classmethod
def task_subsystems(cls):
"""The private, per-task subsystems this task uses.
A tuple of subsystem types.
"""
return (CacheSetup,)
@classmethod
def product_types(cls):
"""The list of products this Task produces. Set the product type(s) for this
task i.e. the product type(s) this task creates e.g ['classes'].
By default, each task is considered as creating a unique product type(s).
Subclasses that create products, should override this to specify their unique product type(s).
"""
return []
@classmethod
def known_scope_infos(cls):
"""Yields ScopeInfo for all known scopes for this task, in no particular order."""
# The task's own scope.
yield cls.get_scope_info()
# The scopes of any task-specific subsystems it uses.
for dep in cls.subsystem_dependencies_iter():
if not dep.is_global():
yield dep.subsystem_cls.get_scope_info(subscope=dep.scope)
@classmethod
def supports_passthru_args(cls):
"""Subclasses may override to indicate that they can use passthru args."""
return False
@classmethod
def _scoped_options(cls, options):
return options[cls.options_scope]
@classmethod
def _alternate_target_roots(cls, options, address_mapper, build_graph):
# Subclasses should not generally need to override this method.
# TODO(John Sirois): Kill when killing GroupTask as part of RoundEngine parallelization.
return cls.alternate_target_roots(cls._scoped_options(options), address_mapper, build_graph)
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
"""Allows a Task to propose alternate target roots from those specified on the CLI.
At most 1 unique proposal is allowed amongst all tasks involved in the run. If more than 1
unique list of target roots is proposed an error is raised during task scheduling.
:returns list: The new target roots to use or none to accept the CLI specified target roots.
"""
@classmethod
def _prepare(cls, options, round_manager):
# Subclasses should not generally need to override this method.
# TODO(John Sirois): Kill when killing GroupTask as part of RoundEngine parallelization.
return cls.prepare(cls._scoped_options(options), round_manager)
@classmethod
def prepare(cls, options, round_manager):
"""Prepares a task for execution.
Called before execution and prior to any tasks that may be (indirectly) depended upon.
Typically a task that requires products from other goals would register interest in those
products here and then retrieve the requested product mappings when executed.
"""
def __init__(self, context, workdir):
"""Subclass __init__ methods, if defined, *must* follow this idiom:
class MyTask(Task):
def __init__(self, *args, **kwargs):
super(MyTask, self).__init__(*args, **kwargs)
...
This allows us to change Task.__init__()'s arguments without
changing every subclass. If the subclass does not need its own
initialization, this method can (and should) be omitted entirely.
"""
super(TaskBase, self).__init__()
self.context = context
self._workdir = workdir
# TODO: It would be nice to use self.get_options().cache_key_gen_version here, because then
# we could have a separate value for each scope if we really wanted to. However we can't
# access per-task options in Task.__init__ because GroupTask.__init__ calls it with the
# group task's scope, which isn't currently in the known scopes we generate options for.
self._cache_key_generator = CacheKeyGenerator(
self.context.options.for_global_scope().cache_key_gen_version)
self._cache_key_errors = set()
self._build_invalidator_dir = os.path.join(
self.context.options.for_global_scope().pants_workdir,
'build_invalidator',
self.stable_name())
self._cache_factory = CacheSetup.create_cache_factory_for_task(self)
self._options_fingerprinter = OptionsFingerprinter(self.context.build_graph)
self._fingerprint = None
def get_options(self):
"""Returns the option values for this task's scope."""
return self.context.options.for_scope(self.options_scope)
def get_passthru_args(self):
if not self.supports_passthru_args():
raise TaskError('{0} Does not support passthru args.'.format(self.stable_name()))
else:
return self.context.options.passthru_args_for_scope(self.options_scope)
@property
def workdir(self):
"""A scratch-space for this task that will be deleted by `clean-all`.
It's not guaranteed that the workdir exists, just that no other task has been given this
workdir path to use.
"""
return self._workdir
def _options_fingerprint(self, scope):
pairs = self.context.options.get_fingerprintable_for_scope(scope)
hasher = sha1()
for (option_type, option_val) in pairs:
fp = self._options_fingerprinter.fingerprint(option_type, option_val)
if fp is not None:
hasher.update(fp)
return hasher.hexdigest()
@property
def fingerprint(self):
"""Returns a fingerprint for the identity of the task.
A task fingerprint is composed of the options the task is currently running under.
Useful for invalidating unchanging targets being executed beneath changing task
options that affect outputted artifacts.
A task's fingerprint is only valid afer the task has been fully initialized.
"""
if not self._fingerprint:
hasher = sha1()
hasher.update(self._options_fingerprint(self.options_scope))
for dep in self.subsystem_dependencies_iter():
hasher.update(self._options_fingerprint(dep.options_scope()))
self._fingerprint = str(hasher.hexdigest())
return self._fingerprint
def artifact_cache_reads_enabled(self):
return self._cache_factory.read_cache_available()
def artifact_cache_writes_enabled(self):
return self._cache_factory.write_cache_available()
def invalidate_for_files(self):
"""Provides extra files that participate in invalidation.
Subclasses can override and return a list of full paths to extra, non-source files that should
be checked for changes when managing target invalidation. This is useful for tracking
changes to pre-built build tools, e.g., the thrift compiler.
"""
return []
def invalidate(self):
"""Invalidates all targets for this task."""
BuildInvalidator(self._build_invalidator_dir).force_invalidate_all()
def create_cache_manager(self, invalidate_dependents, fingerprint_strategy=None):
"""Creates a cache manager that can be used to invalidate targets on behalf of this task.
Use this if you need to check for invalid targets but can't use the contextmanager created by
invalidated(), e.g., because you don't want to mark the targets as valid when done.
invalidate_dependents: If True then any targets depending on changed targets are invalidated.
fingerprint_strategy: A FingerprintStrategy instance, which can do per task, finer grained
fingerprinting of a given Target.
"""
return InvalidationCacheManager(self._cache_key_generator,
self._build_invalidator_dir,
invalidate_dependents,
fingerprint_strategy=fingerprint_strategy,
invalidation_report=self.context.invalidation_report,
task_name=type(self).__name__)
@property
def cache_target_dirs(self):
"""Whether to cache files in VersionedTarget's results_dir after exiting an invalidated block.
Subclasses may override this method to return True if they wish to use this style
of "automated" caching, where each VersionedTarget is given an associated results directory,
which will automatically be uploaded to the cache. Tasks should place the output files
for each VersionedTarget in said results directory. It is highly suggested to follow this
schema for caching, rather than manually making updates to the artifact cache.
"""
return False
@contextmanager
def invalidated(self,
targets,
invalidate_dependents=False,
partition_size_hint=sys.maxint,
silent=False,
locally_changed_targets=None,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are invalidated.
:param partition_size_hint: Each VersionedTargetSet in the yielded list will represent targets
containing roughly this number of source files, if possible. Set to
sys.maxint for a single VersionedTargetSet. Set to 0 for one
VersionedTargetSet per target. It is up to the caller to do the right
thing with whatever partitioning it asks for.
:param locally_changed_targets: Targets that we've edited locally. If specified, and there aren't too
many of them, we keep these in separate partitions from other targets,
as these are more likely to have build errors, and so to be rebuilt over
and over, and partitioning them separately is a performance win.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task, finer grained
fingerprinting of a given Target.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the (partitioned) targets.
:rtype: InvalidationCheck
"""
# TODO(benjy): Compute locally_changed_targets here instead of passing it in? We currently pass
# it in because JvmCompile already has the source->target mapping for other reasons, and also
# to selectively enable this feature.
fingerprint_strategy = fingerprint_strategy or TaskIdentityFingerprintStrategy(self)
cache_manager = self.create_cache_manager(invalidate_dependents,
fingerprint_strategy=fingerprint_strategy)
# We separate locally-modified targets from others by coloring them differently.
# This can be a performance win, because these targets are more likely to be iterated
# over, and this preserves "chunk stability" for them.
colors = {}
# But we only do so if there aren't too many, or this optimization will backfire.
locally_changed_target_limit = 10
if locally_changed_targets and len(locally_changed_targets) < locally_changed_target_limit:
for t in targets:
if t in locally_changed_targets:
colors[t] = 'locally_changed'
else:
colors[t] = 'not_locally_changed'
invalidation_check = cache_manager.check(targets, partition_size_hint, colors, topological_order=topological_order)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
for t in cached_targets:
self.context.run_tracker.artifact_cache_stats.add_hit('default', t)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
for t in uncached_targets:
self.context.run_tracker.artifact_cache_stats.add_miss('default', t)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = \
InvalidationCheck(invalidation_check.all_vts, uncached_vts, partition_size_hint, colors)
if self.cache_target_dirs:
for vt in invalidation_check.all_vts:
vt.create_results_dir(os.path.join(self.workdir, vt.cache_key.hash))
if not silent:
targets = []
num_invalid_partitions = len(invalidation_check.invalid_vts_partitioned)
for vt in invalidation_check.invalid_vts_partitioned:
targets.extend(vt.targets)
if len(targets):
msg_elements = ['Invalidated ',
items_to_report_element([t.address.reference() for t in targets], 'target')]
if num_invalid_partitions > 1:
msg_elements.append(' in {} target partitions'.format(num_invalid_partitions))
msg_elements.append('.')
self.context.log.info(*msg_elements)
invalidation_report = self.context.invalidation_report
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(cache_manager, vts.targets, vts.cache_key, vts.valid,
phase='pre-check')
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(cache_manager, vts.targets, vts.cache_key, vts.valid,
phase='post-check')
for vt in invalidation_check.invalid_vts:
vt.update() # In case the caller doesn't update.
write_to_cache = (self.cache_target_dirs
and self.artifact_cache_writes_enabled()
and invalidation_check.invalid_vts)
if write_to_cache:
def result_files(vt):
return [os.path.join(vt.results_dir, f) for f in os.listdir(vt.results_dir)]
pairs = [(vt, result_files(vt)) for vt in invalidation_check.invalid_vts]
self.update_artifact_cache(pairs)
def check_artifact_cache_for(self, invalidation_check):
"""Decides which VTS to check the artifact cache for.
By default we check for each invalid target. Can be overridden, e.g., to
instead check only for a single artifact for the entire target set.
"""
return invalidation_check.invalid_vts
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a pair (cached, uncached) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
return self.do_check_artifact_cache(vts)
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a pair (cached, uncached) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], []
cached_vts = []
uncached_vts = OrderedSet(vts)
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key) for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.append(vt)
uncached_vts.discard(vt)
elif isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
def flatten(vts):
return list(itertools.chain.from_iterable([vt.versioned_targets for vt in vts]))
all_cached_vts, all_uncached_vts = flatten(cached_vts), flatten(uncached_vts)
if post_process_cached_vts:
post_process_cached_vts(all_cached_vts)
for vt in all_cached_vts:
vt.update()
return all_cached_vts, all_uncached_vts
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self.get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache')
def get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
"""Create a Work instance to update an artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
cache = self._cache_factory.get_write_cache()
if cache:
if len(vts_artifactfiles_pairs) == 0:
return None
# Do some reporting.
targets = set()
for vts, _ in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets('Caching artifacts for ', list(targets), '.')
always_overwrite = self._cache_factory.overwrite()
# Cache the artifacts.
args_tuples = []
for vts, artifactfiles in vts_artifactfiles_pairs:
overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))
return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
else:
return None
def _report_targets(self, prefix, targets, suffix):
self.context.log.info(
prefix,
items_to_report_element([t.address.reference() for t in targets], 'target'),
suffix)
def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0]
class Task(TaskBase):
"""An executable task.
Tasks form the atoms of work done by pants and when executed generally produce artifacts as a
side effect whether these be files on disk (for example compilation outputs) or characters output
to the terminal (for example dependency graph metadata).
"""
@abstractmethod
def execute(self):
"""Executes this task."""
class QuietTaskMixin(object):
"""A mixin to signal that pants shouldn't print verbose progress information for this task."""
pass
|
{
"content_hash": "9b1c96f7c80f95c9ca62ae35e84fd43a",
"timestamp": "",
"source": "github",
"line_count": 524,
"max_line_length": 119,
"avg_line_length": 43.29007633587786,
"alnum_prop": 0.6935284782225357,
"repo_name": "scode/pants",
"id": "a808e3d5ded0ea9645379d0fc9e12489d253cff1",
"size": "22831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/tasks/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "69479"
},
{
"name": "Java",
"bytes": "302900"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3788845"
},
{
"name": "Scala",
"bytes": "76623"
},
{
"name": "Shell",
"bytes": "49094"
},
{
"name": "Thrift",
"bytes": "2583"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import filingcabinet.models
class Migration(migrations.Migration):
dependencies = [
('document', '0003_auto_20180521_1619'),
]
operations = [
migrations.AddField(
model_name='pageannotation',
name='image',
field=models.ImageField(blank=True, max_length=255, upload_to=filingcabinet.models.get_page_annotation_filename),
),
]
|
{
"content_hash": "51ed68f734d6fb7cab35f5a497bb536b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 125,
"avg_line_length": 25.63157894736842,
"alnum_prop": 0.6509240246406571,
"repo_name": "stefanw/froide",
"id": "86d6b841e06ac17f3e5312594ebdba94ac36dfb1",
"size": "561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "froide/document/migrations/0004_pageannotation_image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17807"
},
{
"name": "HTML",
"bytes": "161162"
},
{
"name": "Java",
"bytes": "287939"
},
{
"name": "JavaScript",
"bytes": "1325034"
},
{
"name": "Makefile",
"bytes": "329"
},
{
"name": "Python",
"bytes": "1642783"
},
{
"name": "Shell",
"bytes": "1621"
}
],
"symlink_target": ""
}
|
"""
Copyright (C) 2016, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import datetime
from bennu.localdb import SQLite3Db
import mysql.connector as mysqlConnector
"""
本地端資料庫存取
"""
#Findfine MySQL localdb
class LocalDbForJsonImporter:
#建構子
def __init__(self):
self.mysqlConnection = mysqlConnector.connect(host="localhost", database="findfine", user="findfine_db_root", password="asdfASDF1234")
#解構子
def __del__(self):
self.mysqlConnection.close()
#若無重覆,儲存 trip 資料
def upsertTrip(self, dicTripData=None):
#轉換 strUpdateTime 為 dtUpdateTime
if "strUpdateTime" in dicTripData.keys():
dicTripData["dtUpdateTime"] = datetime.datetime.strptime(dicTripData["strUpdateTime"], "%Y-%m-%d %H:%M:%S")
del dicTripData["strUpdateTime"]
#檢查重覆
queryCursor = self.mysqlConnection.cursor(buffered=True)
upsertCursor = self.mysqlConnection.cursor(buffered=True)
strQuerySql = ("SELECT * FROM trip_trip WHERE strOriginUrl=%(strOriginUrl)s")
queryCursor.execute(strQuerySql, dicTripData)
if queryCursor.rowcount == 0:
#trip 資料 key
lstStrTripDataKey = list(dicTripData.keys())
#INSERT 欄位字串
strTableField = ",".join(lstStrTripDataKey)
#INSERT 值字串
lstStrTableValue = []
for strTripDataKey in lstStrTripDataKey:
lstStrTableValue.append("%%(%s)s"%strTripDataKey)
strTableValue = ",".join(lstStrTableValue)
strInsertSql = "INSERT INTO trip_trip (%s) VALUES (%s)"%(strTableField, strTableValue)
upsertCursor.execute(strInsertSql, dicTripData)
else:
#trip 資料 key
lstStrTripDataKey = list(dicTripData.keys())
#SET 欄位字串
lstStrSET = []
for strTripDataKey in lstStrTripDataKey:
if strTripDataKey != "strOriginUrl":
lstStrSET.append("%s=%%(%s)s"%(strTripDataKey, strTripDataKey))
strSET = ",".join(lstStrSET)
strUpdateSql = "UPDATE trip_trip SET %s WHERE strOriginUrl=%%(strOriginUrl)s"%strSET
upsertCursor.execute(strUpdateSql, dicTripData)
self.mysqlConnection.commit()
#設定指定 strSource 的 trip 為過期資料
def setTripDataStatusAsOutOfDate(self, strSource=None):
updateCursor = self.mysqlConnection.cursor(buffered=True)
dicUpdateData = {
"strUpdateStatus":"out-of-date",
"strSource":strSource
}
strUpdateSql = "UPDATE trip_trip SET strUpdateStatus=%(strUpdateStatus)s WHERE strSource=%(strSource)s"
updateCursor.execute(strUpdateSql, dicUpdateData)
self.mysqlConnection.commit()
#新增或更新 匯率 資料
def upsertExRate(self, dicExRateData=None):
dicExRateData["dtUpdateTime"] = datetime.datetime.strptime(dicExRateData["strUpdateTime"], "%Y-%m-%d %H:%M:%S")
queryCursor = self.mysqlConnection.cursor(buffered=True)
upsertCursor = self.mysqlConnection.cursor(buffered=True)
strQuerySql = ("SELECT * FROM trip_exrate WHERE strCurrencyName=%(strCurrencyName)s")
queryCursor.execute(strQuerySql, dicExRateData)
if queryCursor.rowcount == 0:
strInsertSql = (
"INSERT INTO trip_exrate (strCurrencyName, fUSDollar, dtUpdateTime)"
"VALUES (%(strCurrencyName)s, %(fUSDollar)s, %(dtUpdateTime)s)"
)
upsertCursor.execute(strInsertSql, dicExRateData)
else:
strUpdateSql = (
"UPDATE trip_exrate SET fUSDollar=%(fUSDollar)s, dtUpdateTime=%(dtUpdateTime)s"
"WHERE strCurrencyName=%(strCurrencyName)s"
)
upsertCursor.execute(strUpdateSql, dicExRateData)
self.mysqlConnection.commit()
#清除 行程 資料
def clearTripData(self):
deleteCursor = self.mysqlConnection.cursor(buffered=True)
strDeleteSql = ("DELETE FROM trip_favoritetrip")
deleteCursor.execute(strDeleteSql)
self.mysqlConnection.commit()
strDeleteSql = ("DELETE FROM trip_trip")
deleteCursor.execute(strDeleteSql)
self.mysqlConnection.commit()
#清除 匯率 資料
def clearExRateData(self):
deleteCursor = self.mysqlConnection.cursor(buffered=True)
strDeleteSql = ("DELETE FROM trip_exrate")
deleteCursor.execute(strDeleteSql)
self.mysqlConnection.commit()
#清除測試資料 (clear table)
def clearTestData(self):
deleteCursor = self.mysqlConnection.cursor(buffered=True)
strDeleteSql = ("DELETE FROM trip_trip")
deleteCursor.execute(strDeleteSql)
strDeleteSql = ("DELETE FROM trip_exrate")
deleteCursor.execute(strDeleteSql)
self.mysqlConnection.commit()
#KKDAY crawler localdb (SQLite3)
class LocalDbForKKDAY:
#建構子
def __init__(self):
self.db = SQLite3Db(strResFolderPath="findfine_crawler.resource")
self.initialDb()
#初取化資料庫
def initialDb(self):
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS kkday_product("
"id INTEGER PRIMARY KEY,"
"strProductUrl TEXT NOT NULL,"
"intCountryId INTEGER NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS kkday_country("
"id INTEGER PRIMARY KEY,"
"strCountryPage1Url TEXT NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
#若無重覆,儲存 country
def insertCountryIfNotExists(self, strCountryPage1Url=None):
strSQL = "SELECT * FROM kkday_country WHERE strCountryPage1Url='%s'"%strCountryPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO kkday_country VALUES(NULL, '%s', 0)"%strCountryPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得所有 country 第一頁 url (指定 isGot 狀態)
def fetchallCountryUrl(self, isGot=False):
dicIsGotCode = {True:"1", False:"0"}
strSQL = "SELECT strCountryPage1Url FROM kkday_country WHERE isGot=%s"%dicIsGotCode[isGot]
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrCountryPage1Url = []
for rowData in lstRowData:
lstStrCountryPage1Url.append(rowData["strCountryPage1Url"])
return lstStrCountryPage1Url
#取得所有未完成下載的 country 第一頁 url
def fetchallNotObtainedCountryUrl(self):
return self.fetchallCountryUrl(isGot=False)
#取得所有已完成下載的 country 第一頁 url
def fetchallCompletedObtainedCountryUrl(self):
return self.fetchallCountryUrl(isGot=True)
#更新 country 為已完成下載狀態
def updateCountryStatusIsGot(self, strCountryPage1Url=None):
strSQL = "UPDATE kkday_country SET isGot=1 WHERE strCountryPage1Url='%s'"%strCountryPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得 country id
def fetchCountryIdByUrl(self, strCountryPage1Url=None):
strSQL = "SELECT * FROM kkday_country WHERE strCountryPage1Url='%s'"%strCountryPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
return lstRowData[0]["id"]
#若無重覆 儲存 product URL
def insertProductUrlIfNotExists(self, strProductUrl=None, strCountryPage1Url=None):
intCountryId = self.fetchCountryIdByUrl(strCountryPage1Url=strCountryPage1Url)
#insert product url if not exists
strSQL = "SELECT * FROM kkday_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO kkday_product VALUES(NULL, '%s', %d, 0)"%(strProductUrl, intCountryId)
self.db.commitSQL(strSQL=strSQL)
#取得指定 country 的 product url
def fetchallProductUrlByCountryUrl(self, strCountryPage1Url=None):
intCountryId = self.fetchCountryIdByUrl(strCountryPage1Url=strCountryPage1Url)
strSQL = "SELECT * FROM kkday_product WHERE intCountryId=%d"%intCountryId
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#檢查 product 是否已下載
def checkProductIsGot(self, strProductUrl=None):
isGot = True
strSQL = "SELECT * FROM kkday_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
for rowData in lstRowData:
if rowData["isGot"] == 0:
isGot = False
return isGot
#更新 product 為已完成下載狀態
def updateProductStatusIsGot(self, strProductUrl=None):
strSQL = "UPDATE kkday_product SET isGot=1 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#取得所有已完成下載的 product url
def fetchallCompletedObtainedProductUrl(self):
strSQL = "SELECT strProductUrl FROM kkday_product WHERE isGot=1"
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#更新 product 尚未開始下載狀態
def updateProductStatusIsNotGot(self, strProductUrl=None):
strSQL = "UPDATE kkday_product SET isGot=0 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#清除測試資料 (clear table)
def clearTestData(self):
strSQL = "DELETE FROM kkday_product"
self.db.commitSQL(strSQL=strSQL)
strSQL = "DELETE FROM kkday_country"
self.db.commitSQL(strSQL=strSQL)
#KLOOK crawler localdb (SQLite3)
class LocalDbForKLOOK:
#建構子
def __init__(self):
self.db = SQLite3Db(strResFolderPath="findfine_crawler.resource")
self.initialDb()
#初取化資料庫
def initialDb(self):
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS klook_product("
"id INTEGER PRIMARY KEY,"
"strProductUrl TEXT NOT NULL,"
"intCityId INTEGER NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS klook_city("
"id INTEGER PRIMARY KEY,"
"strCityPage1Url TEXT NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
#若無重覆,儲存 city
def insertCityIfNotExists(self, strCityPage1Url=None):
strSQL = "SELECT * FROM klook_city WHERE strCityPage1Url='%s'"%strCityPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO klook_city VALUES(NULL, '%s', 0)"%strCityPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得所有 city 第一頁 url (指定 isGot 狀態)
def fetchallCityUrl(self, isGot=False):
dicIsGotCode = {True:"1", False:"0"}
strSQL = "SELECT strCityPage1Url FROM klook_city WHERE isGot=%s"%dicIsGotCode[isGot]
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrCityPage1Url = []
for rowData in lstRowData:
lstStrCityPage1Url.append(rowData["strCityPage1Url"])
return lstStrCityPage1Url
#取得所有未完成下載的 city 第一頁 url
def fetchallNotObtainedCityUrl(self):
return self.fetchallCityUrl(isGot=False)
#取得所有已完成下載的 city 第一頁 url
def fetchallCompletedObtainedCityUrl(self):
return self.fetchallCityUrl(isGot=True)
#更新 city 為已完成下載狀態
def updateCityStatusIsGot(self, strCityPage1Url=None):
strSQL = "UPDATE klook_city SET isGot=1 WHERE strCityPage1Url='%s'"%strCityPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得 city id
def fetchCityIdByUrl(self, strCityPage1Url=None):
strSQL = "SELECT * FROM klook_city WHERE strCityPage1Url='%s'"%strCityPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
return lstRowData[0]["id"]
#若無重覆 儲存 product URL
def insertProductUrlIfNotExists(self, strProductUrl=None, strCityPage1Url=None):
intCityId = self.fetchCityIdByUrl(strCityPage1Url=strCityPage1Url)
#insert product url if not exists
strSQL = "SELECT * FROM klook_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO klook_product VALUES(NULL, '%s', %d, 0)"%(strProductUrl, intCityId)
self.db.commitSQL(strSQL=strSQL)
#取得指定 city 的 product url
def fetchallProductUrlByCityUrl(self, strCityPage1Url=None):
intCityId = self.fetchCityIdByUrl(strCityPage1Url=strCityPage1Url)
strSQL = "SELECT * FROM klook_product WHERE intCityId=%d"%intCityId
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#檢查 product 是否已下載
def checkProductIsGot(self, strProductUrl=None):
isGot = True
strSQL = "SELECT * FROM klook_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
for rowData in lstRowData:
if rowData["isGot"] == 0:
isGot = False
return isGot
#更新 product 為已完成下載狀態
def updateProductStatusIsGot(self, strProductUrl=None):
strSQL = "UPDATE klook_product SET isGot=1 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#取得所有已完成下載的 product url
def fetchallCompletedObtainedProductUrl(self):
strSQL = "SELECT strProductUrl FROM klook_product WHERE isGot=1"
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#更新 product 尚未開始下載狀態
def updateProductStatusIsNotGot(self, strProductUrl=None):
strSQL = "UPDATE klook_product SET isGot=0 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#清除測試資料 (clear table)
def clearTestData(self):
strSQL = "DELETE FROM klook_product"
self.db.commitSQL(strSQL=strSQL)
strSQL = "DELETE FROM klook_city"
self.db.commitSQL(strSQL=strSQL)
#KLOOK crawler localdb (SQLite3)
class LocalDbForTRIPBAA:
#建構子
def __init__(self):
self.db = SQLite3Db(strResFolderPath="findfine_crawler.resource")
self.initialDb()
#初取化資料庫
def initialDb(self):
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS tripbaa_product("
"id INTEGER PRIMARY KEY,"
"strProductUrl TEXT NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
#若無重覆 儲存 product URL
def insertProductUrlIfNotExists(self, strProductUrl=None):
strSQL = "SELECT * FROM tripbaa_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO tripbaa_product VALUES(NULL, '%s', 0)"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#取得 product url (isGot = False:未下載 True:已下載)
def fetchallProductUrl(self, isGot=False):
dicIsGotCode = {True:"1", False:"0"}
strSQL = "SELECT strProductUrl FROM tripbaa_product WHERE isGot=%s"%dicIsGotCode.get(isGot, "0")
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#檢查 product 是否已下載
def checkProductIsGot(self, strProductUrl=None):
isGot = True
strSQL = "SELECT * FROM tripbaa_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
for rowData in lstRowData:
if rowData["isGot"] == 0:
isGot = False
return isGot
#更新 product 為已完成下載狀態
def updateProductStatusIsGot(self, strProductUrl=None):
strSQL = "UPDATE tripbaa_product SET isGot=1 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#更新 product 尚未開始下載狀態
def updateProductStatusIsNotGot(self, strProductUrl=None):
strSQL = "UPDATE tripbaa_product SET isGot=0 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#清除測試資料 (clear table)
def clearTestData(self):
strSQL = "DELETE FROM tripbaa_product"
self.db.commitSQL(strSQL=strSQL)
#Voyagin crawler localdb (SQLite3)
class LocalDbForVOYAGIN:
#建構子
def __init__(self):
self.db = SQLite3Db(strResFolderPath="findfine_crawler.resource")
self.initialDb()
#初取化資料庫
def initialDb(self):
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS voyagin_product("
"id INTEGER PRIMARY KEY,"
"strProductUrl TEXT NOT NULL,"
"strLocation TEXT NOT NULL,"
"intDurationHour INTEGER NOT NULL,"
"intCountryId INTEGER NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS voyagin_country("
"id INTEGER PRIMARY KEY,"
"strCountryPage1Url TEXT NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
#若無重覆,儲存 country
def insertCountryIfNotExists(self, strCountryPage1Url=None):
strSQL = "SELECT * FROM voyagin_country WHERE strCountryPage1Url='%s'"%strCountryPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO voyagin_country VALUES(NULL, '%s', 0)"%strCountryPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得所有 country 第一頁 url (指定 isGot 狀態)
def fetchallCountryUrl(self, isGot=False):
dicIsGotCode = {True:"1", False:"0"}
strSQL = "SELECT strCountryPage1Url FROM voyagin_country WHERE isGot=%s"%dicIsGotCode[isGot]
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrCountryPage1Url = []
for rowData in lstRowData:
lstStrCountryPage1Url.append(rowData["strCountryPage1Url"])
return lstStrCountryPage1Url
#取得所有未完成下載的 country 第一頁 url
def fetchallNotObtainedCountryUrl(self):
return self.fetchallCountryUrl(isGot=False)
#取得所有已完成下載的 country 第一頁 url
def fetchallCompletedObtainedCountryUrl(self):
return self.fetchallCountryUrl(isGot=True)
#更新 country 為已完成下載狀態
def updateCountryStatusIsGot(self, strCountryPage1Url=None):
strSQL = "UPDATE voyagin_country SET isGot=1 WHERE strCountryPage1Url='%s'"%strCountryPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得 country id
def fetchCountryIdByUrl(self, strCountryPage1Url=None):
strSQL = "SELECT * FROM voyagin_country WHERE strCountryPage1Url='%s'"%strCountryPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
return lstRowData[0]["id"]
#若無重覆 儲存 product URL
def insertProductUrlIfNotExists(self, strProductUrl=None, strLocation=None, intDurationHour=None, strCountryPage1Url=None):
intCountryId = self.fetchCountryIdByUrl(strCountryPage1Url=strCountryPage1Url)
#insert product url if not exists
strSQL = "SELECT * FROM voyagin_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO voyagin_product VALUES(NULL, '%s', '%s', %d, %d, 0)"%(strProductUrl, strLocation, intDurationHour, intCountryId)
self.db.commitSQL(strSQL=strSQL)
#取得指定 product 的 location 及 duration hour
def fetchLocationAndDurationHourByProductUrl(self, strProductUrl=None):
strSQL = "SELECT * FROM voyagin_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
return (lstRowData[0]["strLocation"], lstRowData[0]["intDurationHour"])
#取得指定 country 的 product url
def fetchallProductUrlByCountryUrl(self, strCountryPage1Url=None):
intCountryId = self.fetchCountryIdByUrl(strCountryPage1Url=strCountryPage1Url)
strSQL = "SELECT * FROM voyagin_product WHERE intCountryId=%d"%intCountryId
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#檢查 product 是否已下載
def checkProductIsGot(self, strProductUrl=None):
isGot = True
strSQL = "SELECT * FROM voyagin_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
for rowData in lstRowData:
if rowData["isGot"] == 0:
isGot = False
return isGot
#更新 product 為已完成下載狀態
def updateProductStatusIsGot(self, strProductUrl=None):
strSQL = "UPDATE voyagin_product SET isGot=1 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#取得所有已完成下載的 product url
def fetchallCompletedObtainedProductUrl(self):
strSQL = "SELECT strProductUrl FROM voyagin_product WHERE isGot=1"
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#更新 product 尚未開始下載狀態
def updateProductStatusIsNotGot(self, strProductUrl=None):
strSQL = "UPDATE voyagin_product SET isGot=0 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#清除測試資料 (clear table)
def clearTestData(self):
strSQL = "DELETE FROM voyagin_product"
self.db.commitSQL(strSQL=strSQL)
strSQL = "DELETE FROM voyagin_country"
self.db.commitSQL(strSQL=strSQL)
#GetYourGuide crawler localdb (SQLite3)
class LocalDbForGYG:
#建構子
def __init__(self):
self.db = SQLite3Db(strResFolderPath="findfine_crawler.resource")
self.initialDb()
#初取化資料庫
def initialDb(self):
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS gyg_product("
"id INTEGER PRIMARY KEY,"
"strProductUrl TEXT NOT NULL,"
"intCityId INTEGER NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
strSQLCreateTable = (
"CREATE TABLE IF NOT EXISTS gyg_city("
"id INTEGER PRIMARY KEY,"
"strCityPage1Url TEXT NOT NULL,"
"isGot BOOLEAN NOT NULL)"
)
self.db.commitSQL(strSQL=strSQLCreateTable)
#若無重覆,儲存 city
def insertCityIfNotExists(self, strCityPage1Url=None):
strSQL = "SELECT * FROM gyg_city WHERE strCityPage1Url='%s'"%strCityPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO gyg_city VALUES(NULL, '%s', 0)"%strCityPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得所有 city 第一頁 url (指定 isGot 狀態)
def fetchallCityUrl(self, isGot=False):
dicIsGotCode = {True:"1", False:"0"}
strSQL = "SELECT strCityPage1Url FROM gyg_city WHERE isGot=%s"%dicIsGotCode[isGot]
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrCityPage1Url = []
for rowData in lstRowData:
lstStrCityPage1Url.append(rowData["strCityPage1Url"])
return lstStrCityPage1Url
#取得所有未完成下載的 city 第一頁 url
def fetchallNotObtainedCityUrl(self):
return self.fetchallCityUrl(isGot=False)
#取得所有已完成下載的 city 第一頁 url
def fetchallCompletedObtainedCityUrl(self):
return self.fetchallCityUrl(isGot=True)
#更新 city 為已完成下載狀態
def updateCityStatusIsGot(self, strCityPage1Url=None):
strSQL = "UPDATE gyg_city SET isGot=1 WHERE strCityPage1Url='%s'"%strCityPage1Url
self.db.commitSQL(strSQL=strSQL)
#取得 city id
def fetchCityIdByUrl(self, strCityPage1Url=None):
strSQL = "SELECT * FROM gyg_city WHERE strCityPage1Url='%s'"%strCityPage1Url
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
return lstRowData[0]["id"]
#若無重覆 儲存 product URL
def insertProductUrlIfNotExists(self, strProductUrl=None, strCityPage1Url=None):
intCityId = self.fetchCityIdByUrl(strCityPage1Url=strCityPage1Url)
#insert product url if not exists
strSQL = "SELECT * FROM gyg_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
if len(lstRowData) == 0:
strSQL = "INSERT INTO gyg_product VALUES(NULL, '%s', %d, 0)"%(strProductUrl, intCityId)
self.db.commitSQL(strSQL=strSQL)
#取得指定 city 的 product url
def fetchallProductUrlByCityUrl(self, strCityPage1Url=None):
intCityId = self.fetchCityIdByUrl(strCityPage1Url=strCityPage1Url)
strSQL = "SELECT * FROM gyg_product WHERE intCityId=%d"%intCityId
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#檢查 product 是否已下載
def checkProductIsGot(self, strProductUrl=None):
isGot = True
strSQL = "SELECT * FROM gyg_product WHERE strProductUrl='%s'"%strProductUrl
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
for rowData in lstRowData:
if rowData["isGot"] == 0:
isGot = False
return isGot
#更新 product 為已完成下載狀態
def updateProductStatusIsGot(self, strProductUrl=None):
strSQL = "UPDATE gyg_product SET isGot=1 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#取得所有已完成下載的 product url
def fetchallCompletedObtainedProductUrl(self):
strSQL = "SELECT strProductUrl FROM gyg_product WHERE isGot=1"
lstRowData = self.db.fetchallSQL(strSQL=strSQL)
lstStrProductUrl = []
for rowData in lstRowData:
lstStrProductUrl.append(rowData["strProductUrl"])
return lstStrProductUrl
#更新 product 尚未開始下載狀態
def updateProductStatusIsNotGot(self, strProductUrl=None):
strSQL = "UPDATE gyg_product SET isGot=0 WHERE strProductUrl='%s'"%strProductUrl
self.db.commitSQL(strSQL=strSQL)
#清除測試資料 (clear table)
def clearTestData(self):
strSQL = "DELETE FROM gyg_product"
self.db.commitSQL(strSQL=strSQL)
strSQL = "DELETE FROM gyg_city"
self.db.commitSQL(strSQL=strSQL)
|
{
"content_hash": "eb53a83452ef082470312e58bf280bd1",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 146,
"avg_line_length": 41.68929110105581,
"alnum_prop": 0.6521707670043415,
"repo_name": "muchu1983/findfine",
"id": "35949f8b487ebd580de09275d2a95bee711bc531",
"size": "28904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "findfine_crawler/localdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54878"
},
{
"name": "HTML",
"bytes": "54872"
},
{
"name": "JavaScript",
"bytes": "19293"
},
{
"name": "Python",
"bytes": "56519"
}
],
"symlink_target": ""
}
|
"""
Tests for the circle module.
"""
import astropy.units as u
import numpy as np
import pytest
from astropy.coordinates import SkyCoord
from numpy.testing import assert_allclose
from photutils.aperture.circle import (CircularAnnulus, CircularAperture,
SkyCircularAnnulus, SkyCircularAperture)
from photutils.aperture.tests.test_aperture_common import BaseTestAperture
from photutils.utils._optional_deps import HAS_MATPLOTLIB
POSITIONS = [(10, 20), (30, 40), (50, 60), (70, 80)]
RA, DEC = np.transpose(POSITIONS)
SKYCOORD = SkyCoord(ra=RA, dec=DEC, unit='deg')
UNIT = u.arcsec
RADII = (0.0, -1.0, -np.inf)
class TestCircularAperture(BaseTestAperture):
aperture = CircularAperture(POSITIONS, r=3.)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason='matplotlib is required')
def test_plot(self):
self.aperture.plot()
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason='matplotlib is required')
def test_plot_returns_patches(self):
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
my_patches = self.aperture.plot()
assert isinstance(my_patches, list)
for patch in my_patches:
assert isinstance(patch, Patch)
# test creating a legend with these patches
plt.legend(my_patches, list(range(len(my_patches))))
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
CircularAperture(POSITIONS, radius)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.r = 2.
assert aper != self.aperture
class TestCircularAnnulus(BaseTestAperture):
aperture = CircularAnnulus(POSITIONS, r_in=3., r_out=7.)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason='matplotlib is required')
def test_plot(self):
self.aperture.plot()
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason='matplotlib is required')
def test_plot_returns_patches(self):
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
my_patches = self.aperture.plot()
assert isinstance(my_patches, list)
for p in my_patches:
assert isinstance(p, Patch)
# make sure I can create a legend with these patches
labels = list(range(len(my_patches)))
plt.legend(my_patches, labels)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
CircularAnnulus(POSITIONS, r_in=radius, r_out=7.)
with pytest.raises(ValueError):
CircularAnnulus(POSITIONS, r_in=3., r_out=radius)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.r_in = 2.
assert aper != self.aperture
class TestSkyCircularAperture(BaseTestAperture):
aperture = SkyCircularAperture(SKYCOORD, r=3. * UNIT)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
SkyCircularAperture(SKYCOORD, r=radius * UNIT)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.r = 2. * UNIT
assert aper != self.aperture
class TestSkyCircularAnnulus(BaseTestAperture):
aperture = SkyCircularAnnulus(SKYCOORD, r_in=3. * UNIT, r_out=7. * UNIT)
@staticmethod
@pytest.mark.parametrize('radius', RADII)
def test_invalid_params(radius):
with pytest.raises(ValueError):
SkyCircularAnnulus(SKYCOORD, r_in=radius * UNIT, r_out=7. * UNIT)
with pytest.raises(ValueError):
SkyCircularAnnulus(SKYCOORD, r_in=3. * UNIT, r_out=radius * UNIT)
def test_copy_eq(self):
aper = self.aperture.copy()
assert aper == self.aperture
aper.r_in = 2. * UNIT
assert aper != self.aperture
def test_slicing():
xypos = [(10, 10), (20, 20), (30, 30)]
aper1 = CircularAperture(xypos, r=3)
aper2 = aper1[0:2]
assert len(aper2) == 2
aper3 = aper1[0]
assert aper3.isscalar
with pytest.raises(TypeError):
len(aper3)
with pytest.raises(TypeError):
_ = aper3[0]
def test_area_overlap():
data = np.ones((11, 11))
xypos = [(0, 0), (5, 5), (50, 50)]
aper = CircularAperture(xypos, r=3)
areas = aper.area_overlap(data)
assert_allclose(areas, [10.304636, np.pi * 9., np.nan])
data2 = np.ones((11, 11)) * u.Jy
areas = aper.area_overlap(data2)
assert not isinstance(areas[0], u.Quantity)
assert_allclose(areas, [10.304636, np.pi * 9., np.nan])
aper2 = CircularAperture(xypos[1], r=3)
area2 = aper2.area_overlap(data)
assert_allclose(area2, np.pi * 9.)
area2 = aper2.area_overlap(data2)
assert not isinstance(area2, u.Quantity)
assert_allclose(area2, np.pi * 9.)
def test_area_overlap_mask():
data = np.ones((11, 11))
mask = np.zeros((11, 11), dtype=bool)
mask[0, 0:2] = True
mask[5, 5:7] = True
xypos = [(0, 0), (5, 5), (50, 50)]
aper = CircularAperture(xypos, r=3)
areas = aper.area_overlap(data, mask=mask)
areas_exp = np.array([10.304636, np.pi * 9., np.nan]) - 2.
assert_allclose(areas, areas_exp)
with pytest.raises(ValueError):
mask = np.zeros((3, 3), dtype=bool)
aper.area_overlap(data, mask=mask)
|
{
"content_hash": "89ac8874f8af2d3e2413cf73f01ffabb",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6434782608695652,
"repo_name": "astropy/photutils",
"id": "33e1150809980379f83f6ce94d31a929ebc9ee8e",
"size": "5584",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "photutils/aperture/tests/test_circle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "32106"
},
{
"name": "Python",
"bytes": "1447767"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^password_change/$', auth_views.password_change, name='password_change'),
url(r'^password_change/done/$', auth_views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete')
]
|
{
"content_hash": "6d202e090df1890639724301fa6d9a6b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 162,
"avg_line_length": 66.23076923076923,
"alnum_prop": 0.6782810685249709,
"repo_name": "meiordac/ecommerce",
"id": "5f2afcd183e2d72944047dedd18da54ef1966c1b",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3408"
},
{
"name": "HTML",
"bytes": "26192"
},
{
"name": "JavaScript",
"bytes": "1919"
},
{
"name": "Python",
"bytes": "31513"
}
],
"symlink_target": ""
}
|
import subprocess
import os,time
from HPC.Logger import Logger
from HPC.HPCManagerPlugins.plugin import Plugin
class slurm(Plugin):
def __init__(self, logFileName):
self.__log= Logger(logFileName)
self.__failedPollTimes = 0
def getHPCResources(self, partition, max_nodes=None, min_nodes=2, min_walltime_m=30):
# copied from RunJobEdison
#cmd = 'showbf -p %s' % partition
cmd = 'sinfo '
self.__log.info("Executing command: '%s'" % cmd)
res_tuple = runcommand(cmd)
self.__log.info("Executing command output: %s" % str(res_tuple))
showbf_str = ""
if res_tuple[0] == 0:
showbf_str = res_tuple[1]
res = {}
self.__log.info("Available resources in %s partition" % partition)
self.__log.info(showbf_str)
if showbf_str:
shobf_out = showbf_str.splitlines()
self.__log.info("Fitted resources")
for l in shobf_out[2:]:
d = l.split()
nodes = int(d[2])
if nodes < int(min_nodes):
continue
if not d[3] == 'INFINITY':
wal_time_arr = d[3].split(":")
if len(wal_time_arr) < 4:
wal_time_sec = int(wal_time_arr[0])*(60*60) + int(wal_time_arr[1])*60 + int(wal_time_arr[2])
if wal_time_sec > 24 * 3600:
wal_time_sec = 24 * 3600
else:
wal_time_sec = 24 * 3600
#if nodes > 1:
# nodes = nodes - 1
else:
wal_time_sec = 12 * 3600
# Fitting Hopper policy
# https://www.nersc.gov/users/computational-systems/hopper/running-jobs/queues-and-policies/
nodes = max_nodes if nodes > max_nodes else nodes
if nodes < 682 and wal_time_sec > 48 * 3600:
wal_time_sec = 48 * 3600
elif nodes < 4096 and wal_time_sec > 36 * 3600:
wal_time_sec = 36 * 3600
elif nodes < 5462 and wal_time_sec > 12 * 3600:
wal_time_sec = 12 * 3600
elif wal_time_sec > 12 * 3600:
wal_time_sec = 12 * 3600
if wal_time_sec < int(min_walltime_m) * 60:
continue
self.__log.info("Nodes: %s, Walltime (str): %s, Walltime (min) %s" % (nodes, d[3], wal_time_sec/60 ))
res.update({nodes:wal_time_sec})
else:
self.__log.info("No availble resources. Default values will be used.")
self.__log.info("Get resources: %s" % res)
return res
def submitJob(self, globalWorkingDir, globalYodaDir, localWorkingDir, queue, repo, mppwidth, mppnppn, walltime, nodes, localSetup=None, cpuPerNode=None, dumpEventOutputs=False):
submit_script = "#!/bin/bash -l" + "\n"
if queue == 'premium':
submit_script += "#SBATCH -p regular\n"
submit_script += "#SBATCH --qos=premium\n"
elif queue == "scavenger":
submit_script += "#SBATCH -p regular\n"
submit_script += "#SBATCH --qos=scavenger\n"
elif queue == "low":
submit_script += "#SBATCH -p regular\n"
submit_script += "#SBATCH --qos=low\n"
else:
submit_script += "#SBATCH -p " + queue + "\n"
if repo:
submit_script += "#SBATCH -A " + repo + "\n"
# submit_script += "#SBATCH -n " + str(mppwidth) + "\n"
submit_script += "#SBATCH -N " + str(nodes) + "\n"
submit_script += "#SBATCH --signal=SIGUSR1@60\n"
submit_script += "#SBATCH -t " + walltime + "\n"
submit_script += "#SBATCH --ntasks-per-node=1\n"
submit_script += "#SBATCH --cpus-per-task=" + str(cpuPerNode) + "\n"
submit_script += "#SBATCH -J ES_job" + "\n"
submit_script += "#SBATCH -o athena_stdout.txt" + "\n"
submit_script += "#SBATCH -e athena_stderr.txt" + "\n"
submit_script += "cd $SBATCH_O_WORKDIR" + "\n"
submit_script += "module load mpi4py" + "\n"
if localSetup:
submit_script += localSetup + "\n"
#submit_script += "source /project/projectdirs/atlas/sw/python-yampl/setup.sh" + "\n"
#submit_script += "export PYTHONPATH=/project/projectdirs/atlas/sw/python-yampl/python-yampl/1.0/lib.linux-x86_64-2.6:$PYTHONPATH" + "\n"
submit_script += "export PYTHONPATH=%s:$PYTHONPATH\n" % globalWorkingDir
#submit_script += "export PYTHONPATH=/project/projectdirs/atlas/pilot/grid_env/boto/lib/python2.6/site-packages:$PYTHONPATH\n"
#submit_script += "export PYTHONPATH=/project/projectdirs/atlas/pilot/grid_env/external:$PYTHONPATH\n"
#submit_script += "export LD_LIBRARY_PATH=/project/projectdirs/atlas/sw/python-yampl/yampl/1.0/lib:$LD_LIBRARY_PATH" + "\n"
#submit_script += "export X509_USER_PROXY=/global/homes/w/wguan/x509up_u23959" + "\n"
#submit_script += "export X509_CERT_DIR=/project/projectdirs/atlas/pilot/grid_env/external/grid-security/certificates" + "\n"
submit_script += "env" + "\n"
# submit_script += "module avail" + "\n"
# submit_script += "module list" + "\n"
#submit_script += "srun -n " + str(nodes) + " -N " + str(mppnppn) + " python-mpi " + os.path.join(globalWorkingDir, "HPC/HPCJob.py") + " --globalWorkingDir="+globalYodaDir+" --localWorkingDir="+localWorkingDir+""
submit_script += "srun -N " + str(nodes) + " python-mpi " + os.path.join(globalWorkingDir, "HPC/HPCJob.py") + " --globalWorkingDir="+globalYodaDir+" --localWorkingDir="+localWorkingDir
if dumpEventOutputs:
submit_script += " --dumpEventOutputs"
###cmd = "mpiexec -n 2 python " + os.path.join(self.__globalWorkingDir, "HPC/HPCJob.py") + " --globalWorkingDir="+self.__globalWorkingDir+" --localWorkingDir="+self.__localWorkingDir+"&"
self.__submit_file = os.path.join(globalYodaDir, 'submit_script')
handle = open(self.__submit_file, 'w')
handle.write(submit_script)
handle.close()
self.__log.info("submit script:\n%s" % submit_script)
cmd = "sbatch " + self.__submit_file
self.__log.info("submitting HPC job: %s" % cmd)
status, output = runcommand(cmd)
self.__log.info("submitting HPC job: (status: %s, output: %s)" %(status, output))
self.__jobid = None
if status == 0:
self.__jobid = output.replace("\n", "").split(" ")[-1]
return 0, self.__jobid
return -1, None
def poll(self, jobid):
# poll the job in HPC. update it
cmd = "scontrol show job " + jobid
self.__log.info("polling HPC job: %s" % cmd)
status, output = runcommand(cmd)
# self.__log.info("polling HPC job: (status: %s, output: %s)" %(status, output))
if status == 0:
self.__failedPollTimes = 0
state = None
lines = output.split("\n")
for line in lines:
line = line.strip()
if line.startswith('JobState'):
state = line.split(" ")[0].split("=")[1]
if state == "COMPLETED":
self.__log.info("HPC job complete")
return "Complete"
if state == "RUNNING":
self.__log.info("HPC job is running")
return "Running"
if state == "PENDING":
self.__log.info("HPC job is pending")
return "Queue"
if state == "FAILED":
self.__log.info("HPC job is failed")
return "Failed"
if state == "CANCELLED":
self.__log.info("HPC job is cancelled")
return "Failed"
if state == "TIMEOUT":
self.__log.info("HPC job is timed out")
return "Failed"
self.__log.info("HPC job is in unknown state")
return 'Unknown'
else:
self.__log.info("polling HPC job: (status: %s, output: %s)" %(status, output))
if 'Invalid job id specified' in output:
self.__log.info("Unknown Job Id. Set Job Complete.")
return "Complete"
else:
self.__failedPollTimes += 1
self.__log.error('Failing HPC job because the polling command has failed ' + str(self.__failedPollTimes) + ' times.')
return 'Unknown'
return 'Unknown'
def delete(self, jobid):
command = "scancel " + jobid
status, output = runcommand(command)
self.__log.debug("Run Command: %s " % command)
self.__log.debug("Status: %s, Output: %s" % (status, output))
def runcommand(cmd):
p = subprocess.Popen(cmd.split(),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
while p.poll() is None:
time.sleep(1)
stdout,stderr = p.communicate()
return (p.returncode,stdout)
|
{
"content_hash": "ac3dc514406e97171617ac48b52381fa",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 220,
"avg_line_length": 46.6051282051282,
"alnum_prop": 0.5390625,
"repo_name": "PanDAWMS/pilot",
"id": "b54a6c2fd6ef8dcd5499af4ad65d0d2954eab3b5",
"size": "9089",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "HPC/HPCManagerPlugins/slurm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4990965"
},
{
"name": "Shell",
"bytes": "23530"
}
],
"symlink_target": ""
}
|
import datetime
from scrapy import log
def string_strip(text, loader_context):
chars = loader_context.get('string_strip', ' \n\t\r')
return text.strip(chars)
def pre_string(text, loader_context):
pre_str = loader_context.get('pre_string', '')
return pre_str + text
def post_string(text, loader_context):
post_str = loader_context.get('post_string', '')
return text + post_str
def pre_url(text, loader_context):
pre_url = loader_context.get('pre_url', '')
if(pre_url[0:7] == 'http://' and text[0:7] == 'http://'):
return text
if(pre_url[-1:] == '/' and text[0:1] == '/'):
pre_url = pre_url[:-1]
return pre_url + text
def replace(text, loader_context):
replace = loader_context.get('replace', '')
return replace
def static(text, loader_context):
static = loader_context.get('static', '')
return static
def date(text, loader_context):
cformat = loader_context.get('date')
try:
if text.lower() in ['gestern', 'yesterday',]:
date = datetime.date.today() - datetime.timedelta(1)
elif text.lower() in ['heute', 'today',]:
date = datetime.date.today()
elif text.lower() in ['morgen', 'tomorrow',]:
date = datetime.date.today() + datetime.timedelta(1)
else:
date = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Date could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return date.strftime('%Y-%m-%d')
def time(text, loader_context):
cformat = loader_context.get('time')
try:
time = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Time could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return time.strftime('%H:%M:%S')
def ts_to_date(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%Y-%m-%d')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def ts_to_time(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%H:%M:%S')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def _breakdown_time_unit_overlap(time_str, limit):
time_list = time_str.split(':')
first = int(time_list[0])
if first >= limit:
time_list[0] = str(first % limit)
time_list.insert(0, str(first // limit))
else:
if(len(time_list[0]) == 1):
time_list[0] = '0' + time_list[0]
time_list.insert(0, '00')
time_str = ':'.join(time_list)
return time_str
def duration(text, loader_context):
cformat = loader_context.get('duration')
#Value completion in special cases
text_int = None
try:
text_int = int(text)
except ValueError:
pass
if(cformat == '%H:%M'):
if text_int:
text += ':00'
if(cformat == '%M'):
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M'
if(cformat == '%M:%S'):
if text_int:
text += ':00'
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M:%S'
if(cformat == '%S'):
if text_int:
if text_int >= 3600:
hours_str = str(text_int / 3600) + ':'
secs_under_hour_str = str(text_int % 3600)
text = hours_str + _breakdown_time_unit_overlap(secs_under_hour_str, 60)
cformat = '%H:%M:%S'
else:
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%M:%S'
try:
duration = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Duration could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return duration.strftime('%H:%M:%S')
|
{
"content_hash": "ea7beb672df213fb92a2027b93d0afda",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 130,
"avg_line_length": 30.956204379562045,
"alnum_prop": 0.5722706908747937,
"repo_name": "sundisee/django-dynamic-scraper",
"id": "29ceef2c00c74c318dc008c67ca778561801a146",
"size": "4241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_scraper/utils/processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7060"
},
{
"name": "Python",
"bytes": "184514"
},
{
"name": "Shell",
"bytes": "4120"
}
],
"symlink_target": ""
}
|
import operator
from rply import LexerGenerator, ParserGenerator
from .utils import BoxInt
class TestBoth(object):
def test_arithmetic(self):
lg = LexerGenerator()
lg.add("NUMBER", r"\d+")
lg.add("PLUS", r"\+")
lg.add("TIMES", r"\*")
pg = ParserGenerator(["NUMBER", "PLUS", "TIMES"], precedence=[
("left", ["PLUS"]),
("left", ["TIMES"]),
])
@pg.production("main : expr")
def main(p):
return p[0]
@pg.production("expr : expr PLUS expr")
@pg.production("expr : expr TIMES expr")
def expr_binop(p):
return BoxInt({
"+": operator.add,
"*": operator.mul
}[p[1].getstr()](p[0].getint(), p[2].getint()))
@pg.production("expr : NUMBER")
def expr_num(p):
return BoxInt(int(p[0].getstr()))
lexer = lg.build()
parser = pg.build()
assert parser.parse(lexer.lex("3*4+5"))
|
{
"content_hash": "71a9621577411b69275694129218f831",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 25.76923076923077,
"alnum_prop": 0.4975124378109453,
"repo_name": "agamdua/rply",
"id": "38c8ad2763a44158ca9e2d95f80205d9c3eb6120",
"size": "1005",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_both.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70400"
},
{
"name": "Shell",
"bytes": "361"
}
],
"symlink_target": ""
}
|
import mock
from six.moves.urllib import parse
from heat.engine import resource
from heat.engine.resources.openstack.keystone import region
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import fakes
from heat.tests import utils
KEYSTONE_REGION_TEMPLATE = {
'heat_template_version': '2015-10-15',
'resources': {
'test_region': {
'type': 'OS::Keystone::Region',
'properties': {
'id': 'test_region_1',
'description': 'Test region',
'parent_region': 'default_region',
'enabled': 'True'
}
}
}
}
RESOURCE_TYPE = 'OS::Keystone::Region'
class KeystoneRegionTest(common.HeatTestCase):
def setUp(self):
super(KeystoneRegionTest, self).setUp()
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack_keystone',
template.Template(KEYSTONE_REGION_TEMPLATE)
)
self.test_region = self.stack['test_region']
# Mock client
self.keystoneclient = mock.Mock()
self.patchobject(resource.Resource, 'client',
return_value=fakes.FakeKeystoneClient(
client=self.keystoneclient))
self.regions = self.keystoneclient.regions
keystone_client_plugin = mock.MagicMock()
self.test_region.client_plugin = mock.MagicMock()
self.test_region.client_plugin.return_value = keystone_client_plugin
def _get_mock_region(self):
value = mock.MagicMock()
region_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
value.id = region_id
return value
def test_region_handle_create(self):
mock_region = self._get_mock_region()
self.regions.create.return_value = mock_region
# validate the properties
self.assertEqual(
'test_region_1',
self.test_region.properties.get(region.KeystoneRegion.ID))
self.assertEqual(
'Test region',
self.test_region.properties.get(
region.KeystoneRegion.DESCRIPTION))
self.assertEqual(
'default_region',
self.test_region.properties.get(
region.KeystoneRegion.PARENT_REGION))
self.assertEqual(
True,
self.test_region.properties.get(region.KeystoneRegion.ENABLED))
self.test_region.handle_create()
# validate region creation
self.regions.create.assert_called_once_with(
id=parse.quote('test_region_1'),
description='Test region',
parent_region='default_region',
enabled=True)
# validate physical resource id
self.assertEqual(mock_region.id, self.test_region.resource_id)
def test_region_handle_create_minimal(self):
values = {
'description': 'sample region',
'enabled': True,
'parent_region': None,
'id': None
}
def _side_effect(key):
return values[key]
mock_region = self._get_mock_region()
self.regions.create.return_value = mock_region
self.test_region.properties = mock.MagicMock()
self.test_region.properties.__getitem__.side_effect = _side_effect
self.test_region.handle_create()
self.regions.create.assert_called_once_with(
id=None,
description='sample region',
parent_region=None,
enabled=True)
def test_region_handle_update(self):
self.test_region.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {region.KeystoneRegion.DESCRIPTION:
'Test Region updated',
region.KeystoneRegion.ENABLED: False,
region.KeystoneRegion.PARENT_REGION: 'test_parent_region'}
self.test_region.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.regions.update.assert_called_once_with(
region=self.test_region.resource_id,
description=prop_diff[region.KeystoneRegion.DESCRIPTION],
enabled=prop_diff[region.KeystoneRegion.ENABLED],
parent_region='test_parent_region'
)
|
{
"content_hash": "d0124c340fc9bfca56424962180fd8b7",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 32.91044776119403,
"alnum_prop": 0.5934240362811791,
"repo_name": "cwolferh/heat-scratch",
"id": "a156f3bd00b14016682429f1feed450e45781870",
"size": "4985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/keystone/test_region.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
}
|
from pip.req import parse_requirements
from pip.download import PipSession
from setuptools import setup
long_desc = """Poor Tux needs a new heart. We do that by feeding it a raspberrypi.
The end goal is to keep Tux's basic functionnality:
- Wings position detection, push buttons and movement
- Mouth movement and position detection
- Eyes position detection, photodetector and lights
- Head button
- Speaker and microphone
- Volume button
"""
session = PipSession()
packages = [
'tuxeatpi_common',
]
install_reqs = parse_requirements('requirements.txt', session=session)
test_reqs = parse_requirements('test_requirements.txt', session=session)
setup(
name='tuxeatpi_common',
version='0.0.1',
packages=packages,
description="""New TuxDroid heart powered by Raspberry pi""",
long_description=long_desc,
author="TuxEatPi Team",
# TODO create team mail
author_email='titilambert@gmail.com',
url="https://github.com/TuxEatPi/common",
download_url="https://github.com/TuxEatPi/common/archive/0.0.1.tar.gz",
package_data={'': ['LICENSE.txt']},
package_dir={'tuxeatpi_common': 'tuxeatpi_common'},
include_package_data=True,
license='Apache 2.0',
classifiers=(
'Programming Language :: Python :: 3.5',
),
install_requires=[str(r.req) for r in install_reqs],
tests_require=[str(r.req) for r in test_reqs],
)
|
{
"content_hash": "20c7776963ba6d6610b7952d8e6fef92",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 30.02127659574468,
"alnum_prop": 0.6959603118355776,
"repo_name": "TuxEatPi/common",
"id": "4f21883b5284f3541f066c30395c60a090d365f7",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1976"
},
{
"name": "Python",
"bytes": "49983"
},
{
"name": "Shell",
"bytes": "1071"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import argparse
try:
import configparser
except:
import ConfigParser as configparser
from stir.script import StirTestCase
from magnolia.server import MinicapService
from magnolia.utility import *
from magnolia.utility import LOG as L
class TestCase_Unit(StirTestCase):
def __init__(self, *args, **kwargs):
super(TestCase_Unit, self).__init__(*args, **kwargs)
self.get_config(self.get("args.config"))
self.get_service()
self.service = MinicapService("minicap", self.adb.get().SERIAL,
self.adb.get().HEIGHT, self.adb.get().WIDTH,
self.adb.get().MINICAP_HEIGHT, self.adb.get().MINICAP_WIDTH, self.adb.get().ROTATE)
def __del__(self):
if self.service != None:
self.service.stop()
def arg_parse(self, parser):
super(TestCase_Unit, self).arg_parse(parser)
parser.add_argument(action='store', dest='testcase', help='TestCase Name.')
parser.add_argument("-s", "--serial", action='store', dest="serial", help="Android Serial.")
return parser
@classmethod
def get_service(cls):
L.info("call. print : %s" % str(cls))
if cls.get("args.package") != None:
prof = os.path.join(SCRIPT_DIR, cls.get("args.package"), "profile")
else:
prof = PROFILE_DIR
cls.adb = cls.service["stir.android"].get(cls.get("args.serial"), prof)
cls.minicap = cls.service["stir.minicap"].get(cls.get("minicap.ip"), int(cls.get("minicap.port")))
cls.pic = cls.service["stir.picture"].get()
if cls.get("args.slack") == None:
serial = cls.get("slack.serial")
else:
serial = cls.get("args.slack")
cls.slack = cls.service["stir.slack"].get(serial)
def get_config(cls, conf=None):
if cls.get("args.package") != None: host = os.path.join(SCRIPT_DIR, cls.get("args.package"))
else: host = SCRIPT_DIR
if conf == None: conf = os.path.join(host, "config.ini")
else: conf = os.path.join(host, "config", conf + ".ini")
try:
config = configparser.RawConfigParser()
cfp = open(conf, 'r')
config.readfp(cfp)
for section in config.sections():
for option in config.options(section):
cls.set("%s.%s" % (section, option), config.get(section, option))
except Exception as e:
L.warning('error: could not read config file: %s' % str(e))
|
{
"content_hash": "71114f5480a459189dbcefde131d5fda",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 106,
"avg_line_length": 38.196969696969695,
"alnum_prop": 0.5985719952399842,
"repo_name": "setsulla/stir",
"id": "a84c00bbe1f893b882b01c588add2989e6b3137f",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/magnolia/script/testcase_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176459"
}
],
"symlink_target": ""
}
|
import warlock
from glanceclient.common import utils
from glanceclient.v2 import schemas
MEMBER_STATUS_VALUES = ('accepted', 'rejected', 'pending')
class Controller(object):
def __init__(self, http_client, schema_client):
self.http_client = http_client
self.schema_client = schema_client
@utils.memoized_property
def model(self):
schema = self.schema_client.get('member')
return warlock.model_factory(schema.raw(), schemas.SchemaBasedModel)
def list(self, image_id):
url = '/v2/images/%s/members' % image_id
resp, body = self.http_client.get(url)
for member in body['members']:
yield self.model(member)
def delete(self, image_id, member_id):
self.http_client.delete('/v2/images/%s/members/%s' %
(image_id, member_id))
def update(self, image_id, member_id, member_status):
url = '/v2/images/%s/members/%s' % (image_id, member_id)
body = {'status': member_status}
resp, updated_member = self.http_client.put(url, data=body)
return self.model(updated_member)
def create(self, image_id, member_id):
url = '/v2/images/%s/members' % image_id
body = {'member': member_id}
resp, created_member = self.http_client.post(url, data=body)
return self.model(created_member)
|
{
"content_hash": "b4deaefa497278bd2bc6a07aa74317c9",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 34.25,
"alnum_prop": 0.6240875912408759,
"repo_name": "mmasaki/python-glanceclient",
"id": "5d07b9b5e821ced6dc4460bfbce84a0eb5454495",
"size": "2006",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glanceclient/v2/image_members.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "576366"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
}
|
import socketio
import eventlet
import json
from flask import Flask
from datetime import datetime as date
from datetime import timedelta
import thread
from colour import get_bus, register_item, item_is_locked_in
class Item(object):
def __init__(self, name, colour):
self.name = name
self.colour = colour
class Alarm(object):
def __init__(self, item, start_time, end_time):
self.item
def toJSON(self, o):
return json.dumps(self.__dict__)
class MessageEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Item):
return obj.__dict__
if isinstance(obj, Alarm):
return obj.__dict__
if isinstance(obj, Colour):
return obj.__dict__
return json.JSONEncoder.default(self, obj)
next_day = {
'monday': 'tuesday',
'tuesday': 'wednesday',
'wednesday': 'thursday',
'thursday': 'friday',
'friday': 'saturday',
'saturday': 'sunday',
'sunday': 'monday'
}
sio = socketio.Server()
connections = set()
alarms = {'id' : 0}
items = {"test": Item("test", Colour(123, 123, 123)), "test2" : Item("test2", Colour(55, 111, 222))}
schedule = {}
active_schedule = {}
bus = get_bus()
def emit_items():
sio.emit('items', json.dumps(list(items.values()), cls=MessageEncoder))
@sio.on('connect')
def connect(sid, data):
global connections
sio.emit('items', json.dumps(list(items.values()), cls=MessageEncoder), sid)
connections.add(sid)
@sio.on('disconnect')
def connect(sid):
global connections
connections.remove(sid)
@sio.on('register')
def register_item(sid, data):
registration_data = json.loads(data)
item_name = registration_data['item_name']
colour = register_item(bus)
items[item_name] = Item(item_name, colour)
emit_items()
@sio.on('unregister')
def unregister_item(sid, data):
data = json.loads(data)
items.pop(data['item_name'], None)
emit_items()
@sio.on('get_items')
def get_items(sid, data):
sio.emit('itemList', json.dumps(list(items.values()), cls=MessageEncoder))
def is_compatible_schedule(candidate_timespans, schedule):
for candidate_timespan in candidate_timespans:
for day, timespan in candidate_timespan.items():
if day in schedule:
candidate_start_time, candidate_end_time, _ = timespan
for start_time, end_time, _ in schedule[day]:
if candidate_start_time < end_time and candidate_start_time > start_time:
return False
elif candidate_end_time > start_time and candidate_end_time < end_time:
return False
return True
def insert_timespan_into_schedule(timespan, schedule):
for day, time_period in timespan.items():
schedule[day] = schedule.get(day, [])
schedule[day].append(time_period)
def emit_schedules():
def format_schedules(schedules):
return [{'start_time': start_time, 'end_time': end_time, 'item_name': item_name} for start_time, end_time, item_name in schedules]
formatted_schedule = {day: format_schedules(schedule) for day, schedule in schedule.items()}
sio.emit('schedules', json.dumps(schedule, cls=MessageEncoder))
def emit_error(error_message):
sio.emit('errors', json.dumps({"Message": error_message}, cls=MessageEncoder))
def format_timespan(timespan):
timespans = []
day = timespan.keys()[0]
start_time, duration, item_name = timespan[day]
if start_time + duration > 1440:
timespans.append({
day : (start_time, 1440, item_name)
})
timespans.append({
next_day[day] : (0, start_time + duration - 1440, item_name)
})
else:
timespans.append(timespan)
return timespans
def remove_from_schedule(timespan, schedule):
for day, time_period in timespan.items():
schedule[day].remove(time_period)
def parse_alarm_payload(data):
data = json.loads(data)
timespan = {day_name : (day['start_time'], day['duration'], day['item_name']) for day_name, day in data['days'].items()}
return format_timespan(timespan)
@sio.on('create_alarm')
def create_alarm(sid, data):
timespans = parse_alarm_payload(data)
if is_compatible_schedule(timespans, schedule):
for timespan in timespans:
insert_timespan_into_schedule(timespan, schedule)
emit_schedules()
else:
emit_error("The alarm you've inserted overlaps with another alarm already scheduled.")
@sio.on('activate_alarm')
def activate_alarm(sid, data):
timespans = parse_alarm_payload(data)
for timespan in timespans:
insert_timespan_into_schedule(timespan, active_schedule)
@sio.on('deactivate_alarm')
def deactivate_alarm(sid, data):
timespans = parse_alarm_payload(data)
for timespan in timespans:
remove_from_schedule(timespan, active_schedule)
@sio.on('remove_alarm')
def remove_alarm(sid, data):
timespans = parse_alarm_payload(data)
for timespan in timespans:
remove_from_schedule(timespan, schedule)
def check_alarms():
while True:
for day, timespans in active_schedule.items():
if date.today().strftime("%A").lower() == day:
current_time = date.now().time()
minutes = current_time.hour() * 60 + current_time.minute()
for start_time, duration, item_name in timespans:
if item_is_locked_in(bus, items[item_name].colour, date.now() + timedelta(minutes = duration)):
sio.emit('stop_alarm', None)
elif start_time - minutes <= 4 and start_time - minutes > 0:
sio.emit('warning', {"item_name": item_name})
elif start_time - minutes < 0:
sio.emit('start_alarm', None)
def createProcessingThread():
thread.start_new_thread(check_alarms)
def initialize_server():
app = Flask(__name__)
app = socketio.Middleware(sio, app)
createProcessingThread()
server = eventlet.listen(('0.0.0.0', 8083))
eventlet.wsgi.server(server, app)
def test_create_alarm():
data = '{"days": {"monday": {"start_time": 1023, "duration": 500, "item_name": "phone"}}}'
create_alarm(None, data)
data2 = '{"days": {"monday": {"start_time": 900, "duration": 300, "item_name": "laptop"}}}'
create_alarm(None, data2)
data3 = '{"days": {"tuesday": {"start_time": 2, "duration": 300, "item_name": "laptop"}}}'
create_alarm(None, data3)
assert(len(schedule) == 2)
def test_activate_alarm():
data = '{"days": {"monday": {"start_time": 1023, "duration": 500, "item_name": "phone"}}}'
activate_alarm(None, data)
check_alarms()
assert(len(active_schedule) == 2)
def test_deactivate_alarm():
data = '{"days": {"monday": {"start_time": 1023, "duration": 500, "item_name": "phone"}}}'
deactivate_alarm(None, data)
assert(len(active_schedule['monday']) == 0)
activate_alarm(None, data)
assert(len(active_schedule['monday']) == 1)
assert(len(active_schedule['tuesday']) == 1)
if __name__ == '__main__':
test_create_alarm()
test_activate_alarm()
test_deactivate_alarm()
initialize_server()
|
{
"content_hash": "880e25fcee837e29151fc20a1e9bc202",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 138,
"avg_line_length": 34.009389671361504,
"alnum_prop": 0.6283821093318609,
"repo_name": "kwang101/5olution",
"id": "5c5a2e82cdb32333c1f86d0e85363b1222a6132a",
"size": "7244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "147612"
},
{
"name": "HTML",
"bytes": "181279"
},
{
"name": "JavaScript",
"bytes": "519355"
},
{
"name": "Python",
"bytes": "10232"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import errno
import os
import select
import socket
import ssl
import gunicorn.http as http
import gunicorn.http.wsgi as wsgi
import gunicorn.util as util
import gunicorn.workers.base as base
from gunicorn import six
class SyncWorker(base.Worker):
def run(self):
# self.socket appears to lose its blocking status after
# we fork in the arbiter. Reset it here.
for s in self.sockets:
s.setblocking(0)
ready = self.sockets
while self.alive:
self.notify()
# Accept a connection. If we get an error telling us
# that no connection is waiting we fall down to the
# select which is where we'll wait for a bit for new
# workers to come give us some love.
for sock in ready:
try:
client, addr = sock.accept()
client.setblocking(1)
util.close_on_exec(client)
self.handle(sock, client, addr)
# Keep processing clients until no one is waiting. This
# prevents the need to select() for every client that we
# process.
continue
except socket.error as e:
if e.args[0] not in (errno.EAGAIN, errno.ECONNABORTED,
errno.EWOULDBLOCK):
raise
# If our parent changed then we shut down.
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return
try:
self.notify()
ret = select.select(self.sockets, [], self.PIPE, self.timeout)
if ret[0]:
ready = ret[0]
continue
except select.error as e:
if e.args[0] == errno.EINTR:
ready = self.sockets
continue
if e.args[0] == errno.EBADF:
if self.nr < 0:
ready = self.sockets
continue
else:
return
raise
def handle(self, listener, client, addr):
req = None
try:
if self.cfg.is_ssl:
client = ssl.wrap_socket(client, server_side=True,
do_handshake_on_connect=False,
**self.cfg.ssl_options)
parser = http.RequestParser(self.cfg, client)
req = six.next(parser)
self.handle_request(listener, req, client, addr)
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
except StopIteration as e:
self.log.debug("Closing connection. %s", e)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_EOF:
self.log.debug("ssl connection closed")
client.close()
else:
self.log.debug("Error processing SSL request.")
self.handle_error(req, client, addr, e)
except socket.error as e:
if e.args[0] != errno.EPIPE:
self.log.exception("Error processing request.")
else:
self.log.debug("Ignoring EPIPE")
except Exception as e:
self.handle_error(req, client, addr, e)
finally:
util.close(client)
def handle_request(self, listener, req, client, addr):
environ = {}
resp = None
try:
self.cfg.pre_request(self, req)
request_start = datetime.now()
resp, environ = wsgi.create(req, client, addr,
listener.getsockname(), self.cfg)
# Force the connection closed until someone shows
# a buffering proxy that supports Keep-Alive to
# the backend.
resp.force_close()
self.nr += 1
if self.nr >= self.max_requests:
self.log.info("Autorestarting worker after current request.")
self.alive = False
respiter = self.wsgi(environ, resp.start_response)
try:
if isinstance(respiter, environ['wsgi.file_wrapper']):
resp.write_file(respiter)
else:
for item in respiter:
resp.write(item)
resp.close()
request_time = datetime.now() - request_start
self.log.access(resp, req, environ, request_time)
finally:
if hasattr(respiter, "close"):
respiter.close()
except socket.error:
raise
except Exception as e:
if resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
try:
client.shutdown(socket.SHUT_RDWR)
client.close()
except socket.error:
pass
return
# Only send back traceback in HTTP in debug mode.
self.handle_error(req, client, addr, e)
return
finally:
try:
self.cfg.post_request(self, req, environ, resp)
except Exception:
self.log.exception("Exception in post_request hook")
|
{
"content_hash": "dd2481438256eb02238519b638fd5ac1",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 78,
"avg_line_length": 36.30065359477124,
"alnum_prop": 0.5061217140799423,
"repo_name": "mpetyx/palmdrop",
"id": "baf45a90e96bc894003185e35f9cb8694ea8f8e5",
"size": "5684",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/gunicorn/workers/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134345"
},
{
"name": "JavaScript",
"bytes": "748285"
},
{
"name": "PHP",
"bytes": "2156"
},
{
"name": "Python",
"bytes": "9560842"
},
{
"name": "Shell",
"bytes": "3695"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import numpy as np
import random
def L1_dist(x, y):
"""
x : one long sample vector
y : array of cluster bases
"""
return np.sum(abs(x-y), axis=1)
def KMeans(N, X, tol, max_iter, metric, centers):
"""
N : number of clusters
X : samples in numpy array, sample_num * feat_num
tol : cost < tol, while loop will break
max_iter : maximun number of iterations kmeans will run
metric : distance metric, support L1-distance here
centers : use determined init cluster centers
"""
if metric == "l1":
sim = L1_dist;
sample_num = X.shape[0]
feat_dim = X.shape[1]
#init random N random cluster centers
if len(centers) == 0:
randint = random.sample(range(sample_num), N)
centers = X[randint,:]
iter = 0
while True:
#compute cluster assignments
cluster_assign = np.array([sim(x, centers).argsort()[0] for x in X])
#re-compute the cluster centers
for n in xrange(N):
centers[n] = np.mean(X[n==cluster_assign], axis=0)
#compute the cost funtion
A = np.zeros(X.shape)
for n in xrange(sample_num):
A[n] = centers[cluster_assign[n]]
cost = np.mean(np.sqrt(np.sum((X-A)**2,axis=1)))
iter = iter + 1
if (cost < tol or iter > max_iter):
break
return cluster_assign
if __name__ == "__main__":
X = np.loadtxt("feat.txt")
N = 3
centers = X[[0,3,6],:]
#if want to rondomly init centers, set centers = []
clusters = KMeans(N=N, X=X, tol=0.1, max_iter=20, metric="l1", centers=centers)
for n in xrange(N):
print "cluster %d:"%(n+1),np.where(n==clusters)[0]+1
|
{
"content_hash": "c88392daa29d0fd8ea330392adab1c7f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 87,
"avg_line_length": 25.25,
"alnum_prop": 0.6455445544554456,
"repo_name": "zhouyang2640/MLCourse",
"id": "82aa119b18fcf702d81013ff3c5c6d4fe8df947d",
"size": "1549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KMeans.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from django.db.models.fields import FloatField
from .base import CompositeField
class ComplexField(CompositeField):
real = FloatField()
imag = FloatField()
def __init__(self, verbose_name=None, blank=False, null=False, default=None):
super(ComplexField, self).__init__()
self.verbose_name = verbose_name
for field in (self['real'], self['imag']):
field.blank = blank
field.null = blank
if default is not None:
self['real'].default = default.real
self['imag'].default = default.imag
def contribute_to_class(self, cls, field_name):
if self.verbose_name is None:
self.verbose_name = field_name.replace('_', ' ')
self['real'].verbose_name = 'Re(%s)' % self.verbose_name
self['imag'].verbose_name = 'Im(%s)' % self.verbose_name
super(ComplexField, self).contribute_to_class(cls, field_name)
def get(self, model):
proxy = self.get_proxy(model)
real, imag = proxy.real, proxy.imag
if real is None and imag is None:
return None
return complex(real or 0, imag or 0)
def set(self, model, value):
proxy = self.get_proxy(model)
if value is None:
proxy.real = None
proxy.imag = None
else:
proxy.real = value.real
proxy.imag = value.imag
|
{
"content_hash": "64e583158e08f13d7ae4438c6af024d6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 33.23809523809524,
"alnum_prop": 0.586676217765043,
"repo_name": "georgemarshall/django-composite-field",
"id": "ea02261b119d807e778fb63d494664909f802d51",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "composite_field/complex.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "27868"
}
],
"symlink_target": ""
}
|
"""
Handles all request to the Platform or Guest VM
"""
from eventlet import Timeout
from oslo.messaging.rpc.client import RemoteError
from oslo import messaging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
import trove.common.rpc.version as rpc_version
from trove.openstack.common import log as logging
from trove import rpc
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
AGENT_LOW_TIMEOUT = CONF.agent_call_low_timeout
AGENT_HIGH_TIMEOUT = CONF.agent_call_high_timeout
AGENT_SNAPSHOT_TIMEOUT = CONF.agent_replication_snapshot_timeout
class API(object):
"""API for interacting with the guest manager."""
def __init__(self, context, id):
self.context = context
self.id = id
super(API, self).__init__()
target = messaging.Target(topic=self._get_routing_key(),
version=rpc_version.RPC_API_VERSION)
self.version_cap = rpc_version.VERSION_ALIASES.get(
CONF.upgrade_levels.guestagent)
self.client = self.get_client(target, self.version_cap)
def get_client(self, target, version_cap, serializer=None):
return rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
def _call(self, method_name, timeout_sec, version, **kwargs):
LOG.debug("Calling %s with timeout %s" % (method_name, timeout_sec))
try:
cctxt = self.client.prepare(version=version, timeout=timeout_sec)
result = cctxt.call(self.context, method_name, **kwargs)
LOG.debug("Result is %s." % result)
return result
except RemoteError as r:
LOG.exception(_("Error calling %s") % method_name)
raise exception.GuestError(original_message=r.value)
except Exception as e:
LOG.exception(_("Error calling %s") % method_name)
raise exception.GuestError(original_message=str(e))
except Timeout:
raise exception.GuestTimeout()
def _cast(self, method_name, version, **kwargs):
LOG.debug("Casting %s" % method_name)
try:
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, method_name, **kwargs)
except RemoteError as r:
LOG.exception(_("Error calling %s") % method_name)
raise exception.GuestError(original_message=r.value)
except Exception as e:
LOG.exception(_("Error calling %s") % method_name)
raise exception.GuestError(original_message=str(e))
def _get_routing_key(self):
"""Create the routing key based on the container id."""
return "guestagent.%s" % self.id
def change_passwords(self, users):
"""Make an asynchronous call to change the passwords of one or more
users.
"""
LOG.debug("Changing passwords for users on instance %s.", self.id)
self._cast("change_passwords", self.version_cap, users=users)
def update_attributes(self, username, hostname, user_attrs):
"""Update user attributes."""
LOG.debug("Changing user attributes on instance %s.", self.id)
self._cast("update_attributes", self.version_cap, username=username,
hostname=hostname, user_attrs=user_attrs)
def create_user(self, users):
"""Make an asynchronous call to create a new database user"""
LOG.debug("Creating Users for instance %s.", self.id)
self._cast("create_user", self.version_cap, users=users)
def get_user(self, username, hostname):
"""Make an asynchronous call to get a single database user."""
LOG.debug("Getting a user %(username)s on instance %(id)s.",
{'username': username, 'id': self.id})
return self._call("get_user", AGENT_LOW_TIMEOUT, self.version_cap,
username=username, hostname=hostname)
def list_access(self, username, hostname):
"""Show all the databases to which a user has more than USAGE."""
LOG.debug("Showing user %(username)s grants on instance %(id)s.",
{'username': username, 'id': self.id})
return self._call("list_access", AGENT_LOW_TIMEOUT, self.version_cap,
username=username, hostname=hostname)
def grant_access(self, username, hostname, databases, permissions):
"""Grant a user permission to use a given database."""
LOG.debug("Granting access to databases %(databases)s for user "
"%(username)s on instance %(id)s.", {'username': username,
'databases': databases,
'id': self.id})
return self._call("grant_access", AGENT_LOW_TIMEOUT, self.version_cap,
username=username, hostname=hostname,
databases=databases, permissions=permissions)
def revoke_access(self, username, hostname, database):
"""Remove a user's permission to use a given database."""
LOG.debug("Revoking access from database %(database)s for user "
"%(username)s on instance %(id)s.", {'username': username,
'database': database,
'id': self.id})
return self._call("revoke_access", AGENT_LOW_TIMEOUT, self.version_cap,
username=username, hostname=hostname,
database=database)
def list_users(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list database users."""
LOG.debug("Listing Users for instance %s.", self.id)
return self._call("list_users", AGENT_HIGH_TIMEOUT, self.version_cap,
limit=limit, marker=marker,
include_marker=include_marker)
def delete_user(self, user):
"""Make an asynchronous call to delete an existing database user."""
LOG.debug("Deleting user %(user)s for instance %(instance_id)s." %
{'user': user, 'instance_id': self.id})
self._cast("delete_user", self.version_cap, user=user)
def create_database(self, databases):
"""Make an asynchronous call to create a new database
within the specified container
"""
LOG.debug("Creating databases for instance %s.", self.id)
self._cast("create_database", self.version_cap, databases=databases)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list databases."""
LOG.debug("Listing databases for instance %s.", self.id)
return self._call("list_databases", AGENT_LOW_TIMEOUT,
self.version_cap, limit=limit, marker=marker,
include_marker=include_marker)
def delete_database(self, database):
"""Make an asynchronous call to delete an existing database
within the specified container
"""
LOG.debug("Deleting database %(database)s for "
"instance %(instance_id)s." % {'database': database,
'instance_id': self.id})
self._cast("delete_database", self.version_cap, database=database)
def enable_root(self):
"""Make a synchronous call to enable the root user for
access from anywhere
"""
LOG.debug("Enable root user for instance %s.", self.id)
return self._call("enable_root", AGENT_HIGH_TIMEOUT, self.version_cap)
def disable_root(self):
"""Make a synchronous call to disable the root user for
access from anywhere
"""
LOG.debug("Disable root user for instance %s.", self.id)
return self._call("disable_root", AGENT_LOW_TIMEOUT, self.version_cap)
def is_root_enabled(self):
"""Make a synchronous call to check if root access is
available for the container
"""
LOG.debug("Check root access for instance %s.", self.id)
return self._call("is_root_enabled", AGENT_LOW_TIMEOUT,
self.version_cap)
def get_hwinfo(self):
"""Make a synchronous call to get hardware info for the container"""
LOG.debug("Check hwinfo on instance %s.", self.id)
return self._call("get_hwinfo", AGENT_LOW_TIMEOUT, self.version_cap)
def get_diagnostics(self):
"""Make a synchronous call to get diagnostics for the container"""
LOG.debug("Check diagnostics on instance %s.", self.id)
return self._call("get_diagnostics", AGENT_LOW_TIMEOUT,
self.version_cap)
def rpc_ping(self):
"""Make a synchronous RPC call to check if we can ping the instance."""
LOG.debug("Check RPC ping on instance %s.", self.id)
return self._call("rpc_ping", AGENT_LOW_TIMEOUT, self.version_cap)
def prepare(self, memory_mb, packages, databases, users,
device_path='/dev/vdb', mount_point='/mnt/volume',
backup_info=None, config_contents=None, root_password=None,
overrides=None, cluster_config=None, snapshot=None):
"""Make an asynchronous call to prepare the guest
as a database container optionally includes a backup id for restores
"""
LOG.debug("Sending the call to prepare the Guest.")
# Taskmanager is a publisher, guestagent is a consumer. Usually
# consumer creates a queue, but in this case we have to make sure
# "prepare" doesn't get lost if for some reason guest was delayed and
# didn't create a queue on time.
self._create_guest_queue()
packages = packages.split()
self._cast(
"prepare", self.version_cap, packages=packages,
databases=databases, memory_mb=memory_mb, users=users,
device_path=device_path, mount_point=mount_point,
backup_info=backup_info, config_contents=config_contents,
root_password=root_password, overrides=overrides,
cluster_config=cluster_config, snapshot=snapshot)
def _create_guest_queue(self):
"""Call to construct, start and immediately stop rpc server in order
to create a queue to communicate with the guestagent. This is
method do nothing in case a queue is already created by
the guest
"""
server = None
target = messaging.Target(topic=self._get_routing_key(),
server=self.id,
version=rpc_version.RPC_API_VERSION)
try:
server = rpc.get_server(target, [])
server.start()
finally:
if server is not None:
server.stop()
def restart(self):
"""Restart the MySQL server."""
LOG.debug("Sending the call to restart MySQL on the Guest.")
self._call("restart", AGENT_HIGH_TIMEOUT, self.version_cap)
def start_db_with_conf_changes(self, config_contents):
"""Start the MySQL server."""
LOG.debug("Sending the call to start MySQL on the Guest with "
"a timeout of %s." % AGENT_HIGH_TIMEOUT)
self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT,
self.version_cap, config_contents=config_contents)
def reset_configuration(self, configuration):
"""Ignore running state of MySQL, and just change the config file
to a new flavor.
"""
LOG.debug("Sending the call to change MySQL conf file on the Guest "
"with a timeout of %s." % AGENT_HIGH_TIMEOUT)
self._call("reset_configuration", AGENT_HIGH_TIMEOUT,
self.version_cap, configuration=configuration)
def stop_db(self, do_not_start_on_reboot=False):
"""Stop the MySQL server."""
LOG.debug("Sending the call to stop MySQL on the Guest.")
self._call("stop_db", AGENT_HIGH_TIMEOUT, self.version_cap,
do_not_start_on_reboot=do_not_start_on_reboot)
def upgrade(self, instance_version, location, metadata=None):
"""Make an asynchronous call to self upgrade the guest agent."""
LOG.debug("Sending an upgrade call to nova-guest.")
self._cast("upgrade", self.version_cap,
instance_version=instance_version,
location=location,
metadata=metadata)
def get_volume_info(self):
"""Make a synchronous call to get volume info for the container."""
LOG.debug("Check Volume Info on instance %s.", self.id)
return self._call("get_filesystem_stats", AGENT_LOW_TIMEOUT,
self.version_cap, fs_path=None)
def update_guest(self):
"""Make a synchronous call to update the guest agent."""
LOG.debug("Updating guest agent on instance %s.", self.id)
self._call("update_guest", AGENT_HIGH_TIMEOUT, self.version_cap)
def create_backup(self, backup_info):
"""Make async call to create a full backup of this instance."""
LOG.debug("Create Backup %(backup_id)s "
"for instance %(instance_id)s." %
{'backup_id': backup_info['id'], 'instance_id': self.id})
self._cast("create_backup", self.version_cap, backup_info=backup_info)
def mount_volume(self, device_path=None, mount_point=None):
"""Mount the volume."""
LOG.debug("Mount volume %(mount)s on instance %(id)s." % {
'mount': mount_point, 'id': self.id})
self._call("mount_volume", AGENT_LOW_TIMEOUT, self.version_cap,
device_path=device_path, mount_point=mount_point)
def unmount_volume(self, device_path=None, mount_point=None):
"""Unmount the volume."""
LOG.debug("Unmount volume %(device)s on instance %(id)s." % {
'device': device_path, 'id': self.id})
self._call("unmount_volume", AGENT_LOW_TIMEOUT, self.version_cap,
device_path=device_path, mount_point=mount_point)
def resize_fs(self, device_path=None, mount_point=None):
"""Resize the filesystem."""
LOG.debug("Resize device %(device)s on instance %(id)s." % {
'device': device_path, 'id': self.id})
self._call("resize_fs", AGENT_HIGH_TIMEOUT, self.version_cap,
device_path=device_path, mount_point=mount_point)
def update_overrides(self, overrides, remove=False):
"""Update the overrides."""
LOG.debug("Updating overrides values %(overrides)s on instance "
"%(id)s.", {'overrides': overrides, 'id': self.id})
self._cast("update_overrides", self.version_cap, overrides=overrides,
remove=remove)
def apply_overrides(self, overrides):
LOG.debug("Applying overrides values %(overrides)s on instance "
"%(id)s.", {'overrides': overrides, 'id': self.id})
self._cast("apply_overrides", self.version_cap, overrides=overrides)
def get_replication_snapshot(self, snapshot_info=None,
replica_source_config=None):
LOG.debug("Retrieving replication snapshot from instance %s.", self.id)
return self._call("get_replication_snapshot", AGENT_SNAPSHOT_TIMEOUT,
self.version_cap, snapshot_info=snapshot_info,
replica_source_config=replica_source_config)
def attach_replication_slave(self, snapshot, replica_config=None):
LOG.debug("Configuring instance %s to replicate from %s.",
self.id, snapshot.get('master').get('id'))
self._cast("attach_replication_slave", self.version_cap,
snapshot=snapshot, slave_config=replica_config)
def detach_replica(self, for_failover=False):
LOG.debug("Detaching replica %s from its replication source.", self.id)
return self._call("detach_replica", AGENT_HIGH_TIMEOUT,
self.version_cap, for_failover=for_failover)
def get_replica_context(self):
LOG.debug("Getting replica context.")
return self._call("get_replica_context",
AGENT_HIGH_TIMEOUT, self.version_cap)
def attach_replica(self, replica_info, slave_config):
LOG.debug("Attaching replica %s." % replica_info)
self._call("attach_replica", AGENT_HIGH_TIMEOUT, self.version_cap,
replica_info=replica_info, slave_config=slave_config)
def make_read_only(self, read_only):
LOG.debug("Executing make_read_only(%s)" % read_only)
self._call("make_read_only", AGENT_HIGH_TIMEOUT, self.version_cap,
read_only=read_only)
def enable_as_master(self, replica_source_config):
LOG.debug("Executing enable_as_master")
self._call("enable_as_master", AGENT_HIGH_TIMEOUT, self.version_cap,
replica_source_config=replica_source_config)
# DEPRECATED: Maintain for API Compatibility
def get_txn_count(self):
LOG.debug("Executing get_txn_count.")
return self._call("get_txn_count",
AGENT_HIGH_TIMEOUT, self.version_cap)
def get_last_txn(self):
LOG.debug("Executing get_last_txn.")
return self._call("get_last_txn",
AGENT_HIGH_TIMEOUT, self.version_cap)
def get_latest_txn_id(self):
LOG.debug("Executing get_latest_txn_id.")
return self._call("get_latest_txn_id",
AGENT_HIGH_TIMEOUT, self.version_cap)
def wait_for_txn(self, txn):
LOG.debug("Executing wait_for_txn.")
self._call("wait_for_txn", AGENT_HIGH_TIMEOUT, self.version_cap,
txn=txn)
def cleanup_source_on_replica_detach(self, replica_info):
LOG.debug("Cleaning up master %s on detach of replica.", self.id)
self._call("cleanup_source_on_replica_detach", AGENT_HIGH_TIMEOUT,
self.version_cap, replica_info=replica_info)
def demote_replication_master(self):
LOG.debug("Demoting instance %s to non-master.", self.id)
self._call("demote_replication_master", AGENT_HIGH_TIMEOUT,
self.version_cap)
|
{
"content_hash": "15e2c3529809d8ca895c8fcbe99f6da3",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 79,
"avg_line_length": 46.67424242424242,
"alnum_prop": 0.6044473299788995,
"repo_name": "CMSS-BCRDB/RDS",
"id": "f42c8c1e028e6539462987e234ac35eab91068a8",
"size": "19123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/guestagent/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60526"
},
{
"name": "Python",
"bytes": "2811396"
},
{
"name": "Shell",
"bytes": "4771"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
'''Base58 encoding
Implementations of Base58 and Base58Check encodings that are compatible
with the bitcoin network.
'''
# This module is based upon base58 snippets found scattered over many bitcoin
# tools written in python. From what I gather the original source is from a
# forum post by Gavin Andresen, so direct your praise to him.
# This module adds shiny packaging and support for python3.
from functools import lru_cache
from hashlib import sha256
from typing import Mapping, Union
__version__ = '2.1.1'
# 58 character alphabet used
BITCOIN_ALPHABET = \
b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
RIPPLE_ALPHABET = b'rpshnaf39wBUDNEGHJKLM4PQRST7VWXYZ2bcdeCg65jkm8oFqi1tuvAxyz'
XRP_ALPHABET = RIPPLE_ALPHABET
# Retro compatibility
alphabet = BITCOIN_ALPHABET
def scrub_input(v: Union[str, bytes]) -> bytes:
if isinstance(v, str):
v = v.encode('ascii')
return v
def b58encode_int(
i: int, default_one: bool = True, alphabet: bytes = BITCOIN_ALPHABET
) -> bytes:
"""
Encode an integer using Base58
"""
if not i and default_one:
return alphabet[0:1]
string = b""
base = len(alphabet)
while i:
i, idx = divmod(i, base)
string = alphabet[idx:idx+1] + string
return string
def b58encode(
v: Union[str, bytes], alphabet: bytes = BITCOIN_ALPHABET
) -> bytes:
"""
Encode a string using Base58
"""
v = scrub_input(v)
origlen = len(v)
v = v.lstrip(b'\0')
newlen = len(v)
acc = int.from_bytes(v, byteorder='big') # first byte is most significant
result = b58encode_int(acc, default_one=False, alphabet=alphabet)
return alphabet[0:1] * (origlen - newlen) + result
@lru_cache()
def _get_base58_decode_map(alphabet: bytes,
autofix: bool) -> Mapping[int, int]:
invmap = {char: index for index, char in enumerate(alphabet)}
if autofix:
groups = [b'0Oo', b'Il1']
for group in groups:
pivots = [c for c in group if c in invmap]
if len(pivots) == 1:
for alternative in group:
invmap[alternative] = invmap[pivots[0]]
return invmap
def b58decode_int(
v: Union[str, bytes], alphabet: bytes = BITCOIN_ALPHABET, *,
autofix: bool = False
) -> int:
"""
Decode a Base58 encoded string as an integer
"""
if b' ' not in alphabet:
v = v.rstrip()
v = scrub_input(v)
map = _get_base58_decode_map(alphabet, autofix=autofix)
decimal = 0
base = len(alphabet)
try:
for char in v:
decimal = decimal * base + map[char]
except KeyError as e:
raise ValueError(
"Invalid character {!r}".format(chr(e.args[0]))
) from None
return decimal
def b58decode(
v: Union[str, bytes], alphabet: bytes = BITCOIN_ALPHABET, *,
autofix: bool = False
) -> bytes:
"""
Decode a Base58 encoded string
"""
v = v.rstrip()
v = scrub_input(v)
origlen = len(v)
v = v.lstrip(alphabet[0:1])
newlen = len(v)
acc = b58decode_int(v, alphabet=alphabet, autofix=autofix)
result = []
while acc > 0:
acc, mod = divmod(acc, 256)
result.append(mod)
return b'\0' * (origlen - newlen) + bytes(reversed(result))
def b58encode_check(
v: Union[str, bytes], alphabet: bytes = BITCOIN_ALPHABET
) -> bytes:
"""
Encode a string using Base58 with a 4 character checksum
"""
v = scrub_input(v)
digest = sha256(sha256(v).digest()).digest()
return b58encode(v + digest[:4], alphabet=alphabet)
def b58decode_check(
v: Union[str, bytes], alphabet: bytes = BITCOIN_ALPHABET, *,
autofix: bool = False
) -> bytes:
'''Decode and verify the checksum of a Base58 encoded string'''
result = b58decode(v, alphabet=alphabet, autofix=autofix)
result, check = result[:-4], result[-4:]
digest = sha256(sha256(result).digest()).digest()
if check != digest[:4]:
raise ValueError("Invalid checksum")
return result
|
{
"content_hash": "edd0185f84a242dc8d1da5825fa4f949",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 79,
"avg_line_length": 25.528301886792452,
"alnum_prop": 0.6304508499630451,
"repo_name": "keis/base58",
"id": "929014f2eb62b8f2184db098a6d7ec11ea832b31",
"size": "4059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base58/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12716"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1
async def sample_get_model_evaluation_slice():
# Create a client
client = aiplatform_v1.ModelServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationSliceRequest(
name="name_value",
)
# Make the request
response = await client.get_model_evaluation_slice(request=request)
# Handle the response
print(response)
# [END aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async]
|
{
"content_hash": "3c0309431a9888bf2b51b927eb2e96e1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 27,
"alnum_prop": 0.732943469785575,
"repo_name": "googleapis/python-aiplatform",
"id": "a7b8371a97b5def962e923e4d62ca23acbd4da44",
"size": "1924",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
from Crypto.Util.number import long_to_bytes, bytes_to_long
import sys
from Crypto.Util.py3compat import *
__all__ = [ 'DerObject', 'DerInteger', 'DerOctetString', 'DerNull', 'DerSequence', 'DerObjectId' ]
class DerObject:
"""Base class for defining a single DER object.
Instantiate this class ONLY when you have to decode a DER element.
"""
# Known TAG types
typeTags = { 'SEQUENCE': 0x30, 'BIT STRING': 0x03, 'INTEGER': 0x02,
'OCTET STRING': 0x04, 'NULL': 0x05, 'OBJECT IDENTIFIER': 0x06 }
def __init__(self, ASN1Type=None, payload=b('')):
"""Initialize the DER object according to a specific type.
The ASN.1 type is either specified as the ASN.1 string (e.g.
'SEQUENCE'), directly with its numerical tag or with no tag
at all (None)."""
if isInt(ASN1Type) or ASN1Type is None:
self.typeTag = ASN1Type
else:
if len(ASN1Type)==1:
self.typeTag = ord(ASN1Type)
else:
self.typeTag = self.typeTags.get(ASN1Type)
self.payload = payload
def isType(self, ASN1Type):
return self.typeTags[ASN1Type]==self.typeTag
def _lengthOctets(self, payloadLen):
"""Return a byte string that encodes the given payload length (in
bytes) in a format suitable for a DER length tag (L).
"""
if payloadLen>127:
encoding = long_to_bytes(payloadLen)
return bchr(len(encoding)+128) + encoding
return bchr(payloadLen)
def encode(self):
"""Return a complete DER element, fully encoded as a TLV."""
return bchr(self.typeTag) + self._lengthOctets(len(self.payload)) + self.payload
def _decodeLen(self, idx, der):
"""Given a (part of a) DER element, and an index to the first byte of
a DER length tag (L), return a tuple with the payload size,
and the index of the first byte of the such payload (V).
Raises a ValueError exception if the DER length is invalid.
Raises an IndexError exception if the DER element is too short.
"""
length = bord(der[idx])
if length<=127:
return (length,idx+1)
payloadLength = bytes_to_long(der[idx+1:idx+1+(length & 0x7F)])
if payloadLength<=127:
raise ValueError("Not a DER length tag.")
return (payloadLength, idx+1+(length & 0x7F))
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete DER element, and re-initializes this
object with it.
@param derEle A complete DER element. It must start with a DER T
tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception in case of parsing errors.
Raises an IndexError exception if the DER element is too short.
"""
try:
self.typeTag = bord(derEle[0])
if (self.typeTag & 0x1F)==0x1F:
raise ValueError("Unsupported DER tag")
(length,idx) = self._decodeLen(1, derEle)
if noLeftOvers and len(derEle) != (idx+length):
raise ValueError("Not a DER structure")
self.payload = derEle[idx:idx+length]
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return idx+length
class DerInteger(DerObject):
def __init__(self, value = 0):
"""Class to model an INTEGER DER element.
Limitation: only non-negative values are supported.
"""
DerObject.__init__(self, 'INTEGER')
self.value = value
def encode(self):
"""Return a complete INTEGER DER element, fully encoded as a TLV."""
self.payload = long_to_bytes(self.value)
if bord(self.payload[0])>127:
self.payload = bchr(0x00) + self.payload
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete INTEGER DER element, and re-initializes this
object with it.
@param derEle A complete INTEGER DER element. It must start with a DER
INTEGER tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
Raises a ValueError exception if the DER element is not a
valid non-negative INTEGER.
Raises an IndexError exception if the DER element is too short.
"""
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['INTEGER']:
raise ValueError ("Not a DER INTEGER.")
if bord(self.payload[0])>127:
raise ValueError ("Negative INTEGER.")
self.value = bytes_to_long(self.payload)
return tlvLength
class DerSequence(DerObject):
"""Class to model a SEQUENCE DER element.
This object behave like a dynamic Python sequence.
Sub-elements that are INTEGERs, look like Python integers.
Any other sub-element is a binary string encoded as the complete DER
sub-element (TLV).
"""
def __init__(self, startSeq=None):
"""Initialize the SEQUENCE DER object. Always empty
initially."""
DerObject.__init__(self, 'SEQUENCE')
if startSeq==None:
self._seq = []
else:
self._seq = startSeq
## A few methods to make it behave like a python sequence
def __delitem__(self, n):
del self._seq[n]
def __getitem__(self, n):
return self._seq[n]
def __setitem__(self, key, value):
self._seq[key] = value
def __setslice__(self,i,j,sequence):
self._seq[i:j] = sequence
def __delslice__(self,i,j):
del self._seq[i:j]
def __getslice__(self, i, j):
return self._seq[max(0, i):max(0, j)]
def __len__(self):
return len(self._seq)
def append(self, item):
return self._seq.append(item)
def hasInts(self):
"""Return the number of items in this sequence that are numbers."""
return len(filter(isInt, self._seq))
def hasOnlyInts(self):
"""Return True if all items in this sequence are numbers."""
return self._seq and self.hasInts()==len(self._seq)
def encode(self):
"""Return the DER encoding for the ASN.1 SEQUENCE, containing
the non-negative integers and longs added to this object.
Limitation: Raises a ValueError exception if it some elements
in the sequence are neither Python integers nor complete DER INTEGERs.
"""
self.payload = b('')
for item in self._seq:
try:
self.payload += item
except:
try:
self.payload += DerInteger(item).encode()
except:
raise ValueError("Trying to DER encode an unknown object")
return DerObject.encode(self)
def decode(self, derEle, noLeftOvers=0):
"""Decode a complete SEQUENCE DER element, and re-initializes this
object with it.
@param derEle A complete SEQUENCE DER element. It must start with a DER
SEQUENCE tag.
@param noLeftOvers Indicate whether it is acceptable to complete the
parsing of the DER element and find that not all
bytes in derEle have been used.
@return Index of the first unused byte in the given DER element.
DER INTEGERs are decoded into Python integers. Any other DER
element is not decoded. Its validity is not checked.
Raises a ValueError exception if the DER element is not a
valid DER SEQUENCE.
Raises an IndexError exception if the DER element is too short.
"""
self._seq = []
try:
tlvLength = DerObject.decode(self, derEle, noLeftOvers)
if self.typeTag!=self.typeTags['SEQUENCE']:
raise ValueError("Not a DER SEQUENCE.")
# Scan one TLV at once
idx = 0
while idx<len(self.payload):
typeTag = bord(self.payload[idx])
if typeTag==self.typeTags['INTEGER']:
newInteger = DerInteger()
idx += newInteger.decode(self.payload[idx:])
self._seq.append(newInteger.value)
else:
itemLen,itemIdx = self._decodeLen(idx+1,self.payload)
self._seq.append(self.payload[idx:itemIdx+itemLen])
idx = itemIdx + itemLen
except IndexError:
raise ValueError("Not a valid DER SEQUENCE.")
return tlvLength
class DerOctetString(DerObject):
def __init__(self, value = b('')):
DerObject.__init__(self, 'OCTET STRING')
self.payload = value
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(self, derEle, noLeftOvers)
if not self.isType("OCTET STRING"):
raise ValueError("Not a valid OCTET STRING.")
return p
class DerNull(DerObject):
def __init__(self):
DerObject.__init__(self, 'NULL')
class DerObjectId(DerObject):
def __init__(self):
DerObject.__init__(self, 'OBJECT IDENTIFIER')
def decode(self, derEle, noLeftOvers=0):
p = DerObject.decode(self, derEle, noLeftOvers)
if not self.isType("OBJECT IDENTIFIER"):
raise ValueError("Not a valid OBJECT IDENTIFIER.")
return p
def isInt(x):
test = 0
try:
test += x
except TypeError:
return 0
return 1
|
{
"content_hash": "0080a6322ec37d03a7b251532f2f9965",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 98,
"avg_line_length": 44.26136363636363,
"alnum_prop": 0.5091998288403937,
"repo_name": "nmercier/linux-cross-gcc",
"id": "37957ce95293e27e7700b0058f2d5394f17e06ae",
"size": "12739",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linux/lib/python2.7/dist-packages/Crypto/Util/asn1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1047092"
},
{
"name": "C++",
"bytes": "151335"
},
{
"name": "Makefile",
"bytes": "82796"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "29123266"
},
{
"name": "Shell",
"bytes": "14668"
}
],
"symlink_target": ""
}
|
from bokeh.models import Div, Select, WidgetBox
from bokehselectize.selectizeselect import SelectizeSelect
from bokeh.io import show, curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Button
import random
DATA = {
'A': dict(email=['foo@bar.com', 'another@email.com', 'third@email.com',
'baz@foo.com', 'last.one@here.org'],
first_name=['Foo', 'Ann', 'Richard', 'Baz', 'One'],
last_name=['Bar', 'Other', 'Third', 'Foo', 'Last']),
'B': dict(email=['B@example.com', 'C@example.com'],
first_name=['B', 'C'],
last_name=['Boo', 'Coo'])
}
SELECTED_DATA = 'A'
options = ColumnDataSource(data=DATA[SELECTED_DATA])
def swap_options():
global SELECTED_DATA
SELECTED_DATA = 'B' if SELECTED_DATA == 'A' else 'A'
options.data = DATA[SELECTED_DATA]
button = Button(label="Swap options")
button.on_click(swap_options)
RENDER_OPTION_TEMPLATE = "<div><div><strong>{first_name} {last_name}</strong></div><div>{email}</div></div>"
RENDER_ITEM_TEMPLATE = "<div>{first_name} {last_name}<{email}></div>"
selectize_select = SelectizeSelect(title='Select a few emails',
placeholder='Go ahead..',
options=options,
label_field='email',
value_field='email',
max_items=None,
render_option_template=RENDER_OPTION_TEMPLATE,
render_item_template=RENDER_ITEM_TEMPLATE,
height=300,
width=200,
input_max_height='280px',
)
div_selected = Div(text="")
def callback(attr, old, new):
div_selected.text = "You have selected: {!r}".format(selectize_select.value)
selectize_select.on_change('value', callback)
def change_new_value():
new_value = random.choice(DATA[SELECTED_DATA]['email'])
selectize_select.value = new_value
button_change_value = Button(label="Set new value")
button_change_value.on_click(change_new_value)
w = WidgetBox(selectize_select, button, button_change_value, div_selected,
width=500)
curdoc().add_root(w)
|
{
"content_hash": "fcae9750578edebcbc3014f609963ad4",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 108,
"avg_line_length": 35.72727272727273,
"alnum_prop": 0.5619168787107719,
"repo_name": "lukauskas/bokeh-selectize",
"id": "876de20c9d9c37fa9c7680e12737063094b428ad",
"size": "2358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9933"
},
{
"name": "TypeScript",
"bytes": "7033"
}
],
"symlink_target": ""
}
|
from __future__ import division
import os
import sys
import time
import csv
import shutil
import threading
import errno
import tempfile
from distutils.version import LooseVersion
from wlauto import Instrument, Parameter, IterationResult
from wlauto.instrumentation import instrument_is_installed
from wlauto.exceptions import (InstrumentError, WorkerThreadError, ConfigError,
DeviceNotRespondingError, TimeoutError)
from wlauto.utils.types import boolean, numeric
try:
import pandas as pd
except ImportError:
pd = None
VSYNC_INTERVAL = 16666667
EPSYLON = 0.0001
class FpsInstrument(Instrument):
name = 'fps'
description = """
Measures Frames Per Second (FPS) and associated metrics for a workload's main View.
.. note:: This instrument depends on pandas Python library (which is not part of standard
WA dependencies), so you will need to install that first, before you can use it.
The view is specified by the workload as ``view`` attribute. This defaults
to ``'SurfaceView'`` for game workloads, and ``None`` for non-game
workloads (as for them FPS mesurement usually doesn't make sense).
Individual workloads may override this.
This instrument adds four metrics to the results:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frames: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occured during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
parameters = [
Parameter('drop_threshold', kind=numeric, default=5,
description='Data points below this FPS will be dropped as they '
'do not constitute "real" gameplay. The assumption '
'being that while actually running, the FPS in the '
'game will not drop below X frames per second, '
'except on loading screens, menus, etc, which '
'should not contribute to FPS calculation. '),
Parameter('keep_raw', kind=boolean, default=False,
description='If set to True, this will keep the raw dumpsys output '
'in the results directory (this is maily used for debugging) '
'Note: frames.csv with collected frames data will always be '
'generated regardless of this setting.'),
Parameter('crash_check', kind=boolean, default=True,
description="""
Specifies wither the instrument should check for crashed content by examining
frame data. If this is set, ``execution_time`` instrument must also be installed.
The check is performed by using the measured FPS and exection time to estimate the expected
frames cound and comparing that against the measured frames count. The the ratio of
measured/expected is too low, then it is assumed that the content has crashed part way
during the run. What is "too low" is determined by ``crash_threshold``.
.. note:: This is not 100\% fool-proof. If the crash occurs sufficiently close to
workload's termination, it may not be detected. If this is expected, the
threshold may be adjusted up to compensate.
"""),
Parameter('crash_threshold', kind=float, default=0.7,
description="""
Specifies the threshold used to decided whether a measured/expected frames ration indicates
a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
quarter lower than expected, it will treated as a content crash.
"""),
]
clear_command = 'dumpsys SurfaceFlinger --latency-clear '
def __init__(self, device, **kwargs):
super(FpsInstrument, self).__init__(device, **kwargs)
self.collector = None
self.outfile = None
self.is_enabled = True
def validate(self):
if not pd or LooseVersion(pd.__version__) < LooseVersion('0.13.1'):
message = ('fps instrument requires pandas Python package (version 0.13.1 or higher) to be installed.\n'
'You can install it with pip, e.g. "sudo pip install pandas"')
raise InstrumentError(message)
if self.crash_check and not instrument_is_installed('execution_time'):
raise ConfigError('execution_time instrument must be installed in order to check for content crash.')
def setup(self, context):
workload = context.workload
if hasattr(workload, 'view'):
self.outfile = os.path.join(context.output_directory, 'frames.csv')
self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger)
self.device.execute(self.clear_command)
else:
self.logger.debug('Workload does not contain a view; disabling...')
self.is_enabled = False
def start(self, context):
if self.is_enabled:
self.logger.debug('Starting SurfaceFlinger collection...')
self.collector.start()
def stop(self, context):
if self.is_enabled and self.collector.is_alive():
self.logger.debug('Stopping SurfaceFlinger collection...')
self.collector.stop()
def update_result(self, context):
if self.is_enabled:
data = pd.read_csv(self.outfile)
if not data.empty: # pylint: disable=maybe-no-member
self._update_stats(context, data)
else:
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
context.result.add_metric('janks', 0)
context.result.add_metric('not_at_vsync', 0)
def slow_update_result(self, context):
result = context.result
if result.has_metric('execution_time'):
self.logger.debug('Checking for crashed content.')
exec_time = result['execution_time'].value
fps = result['FPS'].value
frames = result['frame_count'].value
if all([exec_time, fps, frames]):
expected_frames = fps * exec_time
ratio = frames / expected_frames
self.logger.debug('actual/expected frames: {:.2}'.format(ratio))
if ratio < self.crash_threshold:
self.logger.error('Content for {} appears to have crashed.'.format(context.spec.label))
result.status = IterationResult.FAILED
result.add_event('Content crash detected (actual/expected frames: {:.2}).'.format(ratio))
def _update_stats(self, context, data):
vsync_interval = self.collector.refresh_period
actual_present_time_deltas = (data.actual_present_time - data.actual_present_time.shift()).drop(0) # pylint: disable=E1103
vsyncs_to_compose = (actual_present_time_deltas / vsync_interval).apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
keep_filter = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9))) > self.drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs_to_compose.sum()
if total_vsyncs:
frame_count = filtered_vsyncs_to_compose.size
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
context.result.add_metric('FPS', fps)
context.result.add_metric('frame_count', frame_count)
else:
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
vtc_deltas.index = range(0, vtc_deltas.size)
vtc_deltas = vtc_deltas.drop(0).abs()
janks = vtc_deltas.apply(lambda x: (x > EPSYLON) and 1 or 0).sum()
not_at_vsync = vsyncs_to_compose.apply(lambda x: (abs(x - 1.0) > EPSYLON) and 1 or 0).sum()
context.result.add_metric('janks', janks)
context.result.add_metric('not_at_vsync', not_at_vsync)
else: # no filtered_vsyncs_to_compose
context.result.add_metric('FPS', float('nan'))
context.result.add_metric('frame_count', 0)
context.result.add_metric('janks', 0)
context.result.add_metric('not_at_vsync', 0)
class LatencyCollector(threading.Thread):
# Note: the size of the frames buffer for a particular surface is defined
# by NUM_FRAME_RECORDS inside android/services/surfaceflinger/FrameTracker.h.
# At the time of writing, this was hard-coded to 128. So at 60 fps
# (and there is no reason to go above that, as it matches vsync rate
# on pretty much all phones), there is just over 2 seconds' worth of
# frames in there. Hence the sleep time of 2 seconds between dumps.
#command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done'
command_template = 'dumpsys SurfaceFlinger --latency {}'
def __init__(self, outfile, device, activity, keep_raw, logger):
super(LatencyCollector, self).__init__()
self.outfile = outfile
self.device = device
self.command = self.command_template.format(activity)
self.keep_raw = keep_raw
self.logger = logger
self.stop_signal = threading.Event()
self.frames = []
self.last_ready_time = 0
self.refresh_period = VSYNC_INTERVAL
self.drop_threshold = self.refresh_period * 1000
self.exc = None
self.unresponsive_count = 0
def run(self):
try:
self.logger.debug('SurfaceFlinger collection started.')
self.stop_signal.clear()
fd, temp_file = tempfile.mkstemp()
self.logger.debug('temp file: {}'.format(temp_file))
wfh = os.fdopen(fd, 'wb')
try:
while not self.stop_signal.is_set():
wfh.write(self.device.execute(self.command))
time.sleep(2)
finally:
wfh.close()
# TODO: this can happen after the run during results processing
with open(temp_file) as fh:
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
for line in text.split('\n'):
line = line.strip()
if line:
self._process_trace_line(line)
if self.keep_raw:
raw_file = os.path.join(os.path.dirname(self.outfile), 'surfaceflinger.raw')
shutil.copy(temp_file, raw_file)
os.unlink(temp_file)
except (DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception, e: # pylint: disable=W0703
self.logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
self.exc = WorkerThreadError(self.name, sys.exc_info())
self.logger.debug('SurfaceFlinger collection stopped.')
with open(self.outfile, 'w') as wfh:
writer = csv.writer(wfh)
writer.writerow(['desired_present_time', 'actual_present_time', 'frame_ready_time'])
writer.writerows(self.frames)
self.logger.debug('Frames data written.')
def stop(self):
self.stop_signal.set()
self.join()
if self.unresponsive_count:
message = 'SurfaceFlinger was unrepsonsive {} times.'.format(self.unresponsive_count)
if self.unresponsive_count > 10:
self.logger.warning(message)
else:
self.logger.debug(message)
if self.exc:
raise self.exc # pylint: disable=E0702
self.logger.debug('FSP collection complete.')
def _process_trace_line(self, line):
parts = line.split()
if len(parts) == 3:
desired_present_time, actual_present_time, frame_ready_time = map(int, parts)
if frame_ready_time <= self.last_ready_time:
return # duplicate frame
if (frame_ready_time - desired_present_time) > self.drop_threshold:
self.logger.debug('Dropping bogus frame {}.'.format(line))
return # bogus data
self.last_ready_time = frame_ready_time
self.frames.append((desired_present_time, actual_present_time, frame_ready_time))
elif len(parts) == 1:
self.refresh_period = int(parts[0])
self.drop_threshold = self.refresh_period * 10
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
self.unresponsive_count += 1
else:
self.logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
|
{
"content_hash": "ea6161b622b48ee39c194db4b8df36f7",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 131,
"avg_line_length": 48.76512455516014,
"alnum_prop": 0.6046121287309348,
"repo_name": "rockyzhang/workload-automation",
"id": "ecdd1bb61e9dffee027d39c139530690875ac26b",
"size": "14320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wlauto/instrumentation/fps/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35633"
},
{
"name": "HTML",
"bytes": "8402"
},
{
"name": "Java",
"bytes": "91333"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Prolog",
"bytes": "31390"
},
{
"name": "Python",
"bytes": "968893"
},
{
"name": "Shell",
"bytes": "23204"
},
{
"name": "VimL",
"bytes": "901"
}
],
"symlink_target": ""
}
|
import win32security, ntsecuritycon, winnt
class Enum:
def __init__(self, *const_names):
"""Accepts variable number of constant names that can be found in either
win32security, ntsecuritycon, or winnt."""
for const_name in const_names:
try:
const_val=getattr(win32security,const_name)
except AttributeError:
try:
const_val=getattr(ntsecuritycon, const_name)
except AttributeError:
try:
const_val=getattr(winnt, const_name)
except AttributeError:
raise AttributeError('Constant "%s" not found in win32security, ntsecuritycon, or winnt.' %const_name)
setattr(self, const_name, const_val)
def lookup_name(self, const_val):
"""Looks up the name of a particular value."""
for k,v in self.__dict__.items():
if v==const_val:
return k
raise AttributeError('Value %s not found in enum' %const_val)
def lookup_flags(self, flags):
"""Returns the names of all recognized flags in input, and any flags not found in the enum."""
flag_names=[]
unknown_flags=flags
for k,v in self.__dict__.items():
if flags & v == v:
flag_names.append(k)
unknown_flags = unknown_flags & ~v
return flag_names, unknown_flags
TOKEN_INFORMATION_CLASS = Enum(
'TokenUser',
'TokenGroups',
'TokenPrivileges',
'TokenOwner',
'TokenPrimaryGroup',
'TokenDefaultDacl',
'TokenSource',
'TokenType',
'TokenImpersonationLevel',
'TokenStatistics',
'TokenRestrictedSids',
'TokenSessionId',
'TokenGroupsAndPrivileges',
'TokenSessionReference',
'TokenSandBoxInert',
'TokenAuditPolicy',
'TokenOrigin',
'TokenElevationType',
'TokenLinkedToken',
'TokenElevation',
'TokenHasRestrictions',
'TokenAccessInformation',
'TokenVirtualizationAllowed',
'TokenVirtualizationEnabled',
'TokenIntegrityLevel',
'TokenUIAccess',
'TokenMandatoryPolicy',
'TokenLogonSid')
TOKEN_TYPE = Enum(
'TokenPrimary',
'TokenImpersonation')
TOKEN_ELEVATION_TYPE = Enum(
'TokenElevationTypeDefault',
'TokenElevationTypeFull',
'TokenElevationTypeLimited')
POLICY_AUDIT_EVENT_TYPE = Enum(
'AuditCategorySystem',
'AuditCategoryLogon',
'AuditCategoryObjectAccess',
'AuditCategoryPrivilegeUse',
'AuditCategoryDetailedTracking',
'AuditCategoryPolicyChange',
'AuditCategoryAccountManagement',
'AuditCategoryDirectoryServiceAccess',
'AuditCategoryAccountLogon')
POLICY_INFORMATION_CLASS = Enum(
'PolicyAuditLogInformation',
'PolicyAuditEventsInformation',
'PolicyPrimaryDomainInformation',
'PolicyPdAccountInformation',
'PolicyAccountDomainInformation',
'PolicyLsaServerRoleInformation',
'PolicyReplicaSourceInformation',
'PolicyDefaultQuotaInformation',
'PolicyModificationInformation',
'PolicyAuditFullSetInformation',
'PolicyAuditFullQueryInformation',
'PolicyDnsDomainInformation')
POLICY_LSA_SERVER_ROLE = Enum(
'PolicyServerRoleBackup',
'PolicyServerRolePrimary')
## access modes for opening a policy handle - this is not a real enum
POLICY_ACCESS_MODES = Enum(
'POLICY_VIEW_LOCAL_INFORMATION',
'POLICY_VIEW_AUDIT_INFORMATION',
'POLICY_GET_PRIVATE_INFORMATION',
'POLICY_TRUST_ADMIN',
'POLICY_CREATE_ACCOUNT',
'POLICY_CREATE_SECRET',
'POLICY_CREATE_PRIVILEGE',
'POLICY_SET_DEFAULT_QUOTA_LIMITS',
'POLICY_SET_AUDIT_REQUIREMENTS',
'POLICY_AUDIT_LOG_ADMIN',
'POLICY_SERVER_ADMIN',
'POLICY_LOOKUP_NAMES',
'POLICY_NOTIFICATION',
'POLICY_ALL_ACCESS',
'POLICY_READ',
'POLICY_WRITE',
'POLICY_EXECUTE')
## EventAuditingOptions flags - not a real enum
POLICY_AUDIT_EVENT_OPTIONS_FLAGS = Enum(
'POLICY_AUDIT_EVENT_UNCHANGED',
'POLICY_AUDIT_EVENT_SUCCESS',
'POLICY_AUDIT_EVENT_FAILURE',
'POLICY_AUDIT_EVENT_NONE')
# AceType in ACE_HEADER - not a real enum
ACE_TYPE = Enum(
'ACCESS_MIN_MS_ACE_TYPE',
'ACCESS_ALLOWED_ACE_TYPE',
'ACCESS_DENIED_ACE_TYPE',
'SYSTEM_AUDIT_ACE_TYPE',
'SYSTEM_ALARM_ACE_TYPE',
'ACCESS_MAX_MS_V2_ACE_TYPE',
'ACCESS_ALLOWED_COMPOUND_ACE_TYPE',
'ACCESS_MAX_MS_V3_ACE_TYPE',
'ACCESS_MIN_MS_OBJECT_ACE_TYPE',
'ACCESS_ALLOWED_OBJECT_ACE_TYPE',
'ACCESS_DENIED_OBJECT_ACE_TYPE',
'SYSTEM_AUDIT_OBJECT_ACE_TYPE',
'SYSTEM_ALARM_OBJECT_ACE_TYPE',
'ACCESS_MAX_MS_OBJECT_ACE_TYPE',
'ACCESS_MAX_MS_V4_ACE_TYPE',
'ACCESS_MAX_MS_ACE_TYPE',
'ACCESS_ALLOWED_CALLBACK_ACE_TYPE',
'ACCESS_DENIED_CALLBACK_ACE_TYPE',
'ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE',
'ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE',
'SYSTEM_AUDIT_CALLBACK_ACE_TYPE',
'SYSTEM_ALARM_CALLBACK_ACE_TYPE',
'SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE',
'SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE',
'SYSTEM_MANDATORY_LABEL_ACE_TYPE',
'ACCESS_MAX_MS_V5_ACE_TYPE')
#bit flags for AceFlags - not a real enum
ACE_FLAGS = Enum(
'CONTAINER_INHERIT_ACE',
'FAILED_ACCESS_ACE_FLAG',
'INHERIT_ONLY_ACE',
'INHERITED_ACE',
'NO_PROPAGATE_INHERIT_ACE',
'OBJECT_INHERIT_ACE',
'SUCCESSFUL_ACCESS_ACE_FLAG',
'NO_INHERITANCE',
'SUB_CONTAINERS_AND_OBJECTS_INHERIT',
'SUB_CONTAINERS_ONLY_INHERIT',
'SUB_OBJECTS_ONLY_INHERIT')
# used in SetEntriesInAcl - very similar to ACE_TYPE
ACCESS_MODE = Enum(
'NOT_USED_ACCESS',
'GRANT_ACCESS',
'SET_ACCESS',
'DENY_ACCESS',
'REVOKE_ACCESS',
'SET_AUDIT_SUCCESS',
'SET_AUDIT_FAILURE')
# Bit flags in PSECURITY_DESCRIPTOR->Control - not a real enum
SECURITY_DESCRIPTOR_CONTROL_FLAGS = Enum(
'SE_DACL_AUTO_INHERITED', ## win2k and up
'SE_SACL_AUTO_INHERITED', ## win2k and up
'SE_DACL_PROTECTED', ## win2k and up
'SE_SACL_PROTECTED', ## win2k and up
'SE_DACL_DEFAULTED',
'SE_DACL_PRESENT',
'SE_GROUP_DEFAULTED',
'SE_OWNER_DEFAULTED',
'SE_SACL_PRESENT',
'SE_SELF_RELATIVE',
'SE_SACL_DEFAULTED')
# types of SID
SID_NAME_USE = Enum(
'SidTypeUser',
'SidTypeGroup',
'SidTypeDomain',
'SidTypeAlias',
'SidTypeWellKnownGroup',
'SidTypeDeletedAccount',
'SidTypeInvalid',
'SidTypeUnknown',
'SidTypeComputer',
'SidTypeLabel')
## bit flags, not a real enum
TOKEN_ACCESS_PRIVILEGES = Enum(
'TOKEN_ADJUST_DEFAULT',
'TOKEN_ADJUST_GROUPS',
'TOKEN_ADJUST_PRIVILEGES',
'TOKEN_ALL_ACCESS',
'TOKEN_ASSIGN_PRIMARY',
'TOKEN_DUPLICATE',
'TOKEN_EXECUTE',
'TOKEN_IMPERSONATE',
'TOKEN_QUERY',
'TOKEN_QUERY_SOURCE',
'TOKEN_READ',
'TOKEN_WRITE')
SECURITY_IMPERSONATION_LEVEL = Enum(
'SecurityAnonymous',
'SecurityIdentification',
'SecurityImpersonation',
'SecurityDelegation')
POLICY_SERVER_ENABLE_STATE = Enum(
'PolicyServerEnabled',
'PolicyServerDisabled')
POLICY_NOTIFICATION_INFORMATION_CLASS = Enum(
'PolicyNotifyAuditEventsInformation',
'PolicyNotifyAccountDomainInformation',
'PolicyNotifyServerRoleInformation',
'PolicyNotifyDnsDomainInformation',
'PolicyNotifyDomainEfsInformation',
'PolicyNotifyDomainKerberosTicketInformation',
'PolicyNotifyMachineAccountPasswordInformation')
TRUSTED_INFORMATION_CLASS = Enum(
'TrustedDomainNameInformation',
'TrustedControllersInformation',
'TrustedPosixOffsetInformation',
'TrustedPasswordInformation',
'TrustedDomainInformationBasic',
'TrustedDomainInformationEx',
'TrustedDomainAuthInformation',
'TrustedDomainFullInformation',
'TrustedDomainAuthInformationInternal',
'TrustedDomainFullInformationInternal',
'TrustedDomainInformationEx2Internal',
'TrustedDomainFullInformation2Internal')
TRUSTEE_FORM = Enum(
'TRUSTEE_IS_SID',
'TRUSTEE_IS_NAME',
'TRUSTEE_BAD_FORM',
'TRUSTEE_IS_OBJECTS_AND_SID',
'TRUSTEE_IS_OBJECTS_AND_NAME')
TRUSTEE_TYPE = Enum(
'TRUSTEE_IS_UNKNOWN',
'TRUSTEE_IS_USER',
'TRUSTEE_IS_GROUP',
'TRUSTEE_IS_DOMAIN',
'TRUSTEE_IS_ALIAS',
'TRUSTEE_IS_WELL_KNOWN_GROUP',
'TRUSTEE_IS_DELETED',
'TRUSTEE_IS_INVALID',
'TRUSTEE_IS_COMPUTER')
## SE_OBJECT_TYPE - securable objects
SE_OBJECT_TYPE = Enum(
'SE_UNKNOWN_OBJECT_TYPE',
'SE_FILE_OBJECT',
'SE_SERVICE',
'SE_PRINTER',
'SE_REGISTRY_KEY',
'SE_LMSHARE',
'SE_KERNEL_OBJECT',
'SE_WINDOW_OBJECT',
'SE_DS_OBJECT',
'SE_DS_OBJECT_ALL',
'SE_PROVIDER_DEFINED_OBJECT',
'SE_WMIGUID_OBJECT',
'SE_REGISTRY_WOW64_32KEY')
PRIVILEGE_FLAGS = Enum(
'SE_PRIVILEGE_ENABLED_BY_DEFAULT',
'SE_PRIVILEGE_ENABLED',
'SE_PRIVILEGE_USED_FOR_ACCESS')
# Group flags used with TokenGroups
TOKEN_GROUP_ATTRIBUTES = Enum(
'SE_GROUP_MANDATORY',
'SE_GROUP_ENABLED_BY_DEFAULT',
'SE_GROUP_ENABLED',
'SE_GROUP_OWNER',
'SE_GROUP_USE_FOR_DENY_ONLY',
'SE_GROUP_INTEGRITY',
'SE_GROUP_INTEGRITY_ENABLED',
'SE_GROUP_LOGON_ID',
'SE_GROUP_RESOURCE')
# Privilege flags returned by TokenPrivileges
TOKEN_PRIVILEGE_ATTRIBUTES = Enum(
'SE_PRIVILEGE_ENABLED_BY_DEFAULT',
'SE_PRIVILEGE_ENABLED',
'SE_PRIVILEGE_REMOVED',
'SE_PRIVILEGE_USED_FOR_ACCESS')
|
{
"content_hash": "db2f85eaeebe3a202360f263abf495eb",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 126,
"avg_line_length": 29.936708860759495,
"alnum_prop": 0.6593023255813953,
"repo_name": "zooba/PTVS",
"id": "c0b7aff5351c56a239221d4e4a7017969e538949",
"size": "9460",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/Demos/security/security_enums.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
import os.path
import re
import random
import time
from typing import Dict, Any
from kvmagent import kvmagent
from kvmagent.plugins.imagestore import ImageStoreClient
from kvmagent.plugins import mini_fencer
from zstacklib.utils import jsonobject
from zstacklib.utils import http
from zstacklib.utils import log
from zstacklib.utils import shell
from zstacklib.utils import linux
from zstacklib.utils import lock
from zstacklib.utils import lvm
from zstacklib.utils import bash
from zstacklib.utils import drbd
logger = log.get_logger(__name__)
LOCK_FILE = "/var/run/zstack/ministorage.lock"
BACKUP_DIR = "/var/lib/zstack/ministorage/backup"
INIT_TAG = "zs::ministorage::init"
FENCER_TAG = "zs::ministorage::fencer"
MANAGEMENT_TAG = "zs::ministorage::management"
HEARTBEAT_TAG = "zs::ministorage::heartbeat"
VOLUME_TAG = "zs::ministorage::volume"
IMAGE_TAG = "zs::ministorage::image"
DEFAULT_VG_METADATA_SIZE = "1g"
INIT_POOL_RATIO = 0.1
DEFAULT_CHUNK_SIZE = "4194304"
DRBD_START_PORT = 20000
class AgentCmd(object):
def __init__(self):
pass
class AgentRsp(object):
def __init__(self):
self.success = True
self.error = None
self.totalCapacity = None
self.availableCapacity = None
class ConnectCmd(AgentCmd):
@log.sensitive_fields("peerSshPassword", "peerSshUsername")
def __init__(self):
super(ConnectCmd, self).__init__()
self.diskIdentifiers = []
self.forceWipe = False
self.storageNetworkCidr = None
self.fencerAddress = None
self.magementAddress = None
self.peerManagementAddress = None
self.peerSshPassword = None
self.peerSshUsername = None
class ConnectRsp(AgentRsp):
def __init__(self):
super(ConnectRsp, self).__init__()
self.hostUuid = None
self.storageNetworkAddress = None
class VolumeRsp(AgentRsp):
def __init__(self):
super(VolumeRsp, self).__init__()
self.actualSize = None
self.resourceUuid = None
self.localRole = None
self.localDiskStatus = None
self.localNetworkStatus = None
self.remoteRole = None
self.remoteDiskStatus = None
self.remoteNetworkStatus = None
self.minor = None
def _init_from_drbd(self, r):
"""
:type r: drbd.DrbdResource
"""
if not r.minor_allocated():
self.localNetworkStatus = drbd.DrbdNetState.Unconfigured
return
self.actualSize = int(lvm.get_lv_size(r.config.local_host.disk))
self.resourceUuid = r.name
self.localRole = r.get_role()
self.localDiskStatus = r.get_dstate()
self.remoteRole = r.get_remote_role()
self.remoteDiskStatus = r.get_remote_dstate()
self.localNetworkStatus = r.get_cstate()
self.minor = int(r.config.local_host.minor)
class ActiveRsp(VolumeRsp):
def __init__(self):
super(ActiveRsp, self).__init__()
self.snapPath = ""
class CheckBitsRsp(AgentRsp):
existing = False # type: bool
replications = {} # type: Dict[str, ReplicationInformation]
def __init__(self):
super(CheckBitsRsp, self).__init__()
self.existing = False
self.replications = dict()
self.storageNetworkStatus = "Connected"
class ReplicationInformation(object):
diskStatus = None # type: str
networkStatus = None # type: str
role = None # type: str
size = None # type: long
name = None # type: str
minor = None # type: long
def __init__(self):
super(ReplicationInformation, self).__init__()
self.diskStatus = None # type: str
self.networkStatus = None # type: str
self.role = None # type: str
self.size = None # type: long
self.name = None # type: str
self.minor = None # type: long
class GetVolumeSizeRsp(VolumeRsp):
def __init__(self):
super(GetVolumeSizeRsp, self).__init__()
self.size = None
self.actualSize = None
class ResizeVolumeRsp(VolumeRsp):
def __init__(self):
super(ResizeVolumeRsp, self).__init__()
self.size = None
class RetryException(Exception):
pass
class GetBlockDevicesRsp(AgentRsp):
blockDevices = None # type: list[lvm.SharedBlockCandidateStruct]
def __init__(self):
super(GetBlockDevicesRsp, self).__init__()
self.blockDevices = None
class ConvertVolumeProvisioningRsp(AgentRsp):
actualSize = None # type: int
def __init__(self):
super(ConvertVolumeProvisioningRsp, self).__init__()
self.actualSize = 0
class RevertVolumeFromSnapshotRsp(VolumeRsp):
def __init__(self):
super(RevertVolumeFromSnapshotRsp, self).__init__()
self.newVolumeInstallPath = None
self.size = None
class GetQCOW2ReferenceRsp(AgentRsp):
def __init__(self):
super(GetQCOW2ReferenceRsp, self).__init__()
self.referencePaths = None
class UploadBitsToFileSystemRsp(AgentRsp):
def __init__(self):
super(UploadBitsToFileSystemRsp, self).__init__()
self.totalSize = 0
class DownloadBitsFromFileSystemRsp(AgentRsp):
def __init__(self):
super(DownloadBitsFromFileSystemRsp, self).__init__()
self.downloadedInfos = [] # type: list[LvInfo]
class LvInfo(object):
def __init__(self, install_path, size):
self.installPath = install_path
self.size = size
def get_absolute_path_from_install_path(path):
if path is None:
raise Exception("install path can not be null")
return path.replace("mini:/", "/dev")
def get_primary_storage_uuid_from_install_path(path):
# type: (str) -> str
if path is None:
raise Exception("install path can not be null")
return path.split("/")[2]
class CheckDisk(object):
def __init__(self, identifier):
self.identifier = identifier
@bash.in_bash
def check_disk_by_path(self):
if bash.bash_r("ls %s" % self.identifier) == 0:
return self.identifier
return None
def get_path(self):
o = self.check_disk_by_path()
if o is not None:
return o
raise Exception("can not find disk with %s as wwid, uuid or wwn, "
"or multiple disks qualify but no mpath device found" % self.identifier)
@bash.in_bash
def rescan(self, disk_name=None):
"""
:type disk_name: str
"""
if disk_name is None:
disk_name = self.get_path().split("/")[-1]
def rescan_slave(slave, raise_exception=True):
_cmd = shell.ShellCmd("echo 1 > /sys/block/%s/device/rescan" % slave)
_cmd(is_exception=raise_exception)
logger.debug("rescaned disk %s (wwid: %s), return code: %s, stdout %s, stderr: %s" %
(slave, self.identifier, _cmd.return_code, _cmd.stdout, _cmd.stderr))
multipath_dev = lvm.get_multipath_dmname(disk_name)
if multipath_dev:
t, disk_name = disk_name, multipath_dev
# disk name is dm-xx when multi path
slaves = shell.call("ls /sys/class/block/%s/slaves/" % disk_name).strip().split("\n")
if slaves is None or len(slaves) == 0 or (len(slaves) == 1 and slaves[0].strip() == ""):
logger.debug("can not get any slaves of multipath device %s" % disk_name)
rescan_slave(disk_name, False)
else:
for s in slaves:
rescan_slave(s)
cmd = shell.ShellCmd("multipathd resize map %s" % disk_name)
cmd(is_exception=True)
logger.debug("resized multipath device %s, return code: %s, stdout %s, stderr: %s" %
(disk_name, cmd.return_code, cmd.stdout, cmd.stderr))
disk_name = t
else:
rescan_slave(disk_name)
command = "pvresize /dev/%s" % disk_name
if multipath_dev is not None and multipath_dev != disk_name:
command = "pvresize /dev/%s || pvresize /dev/%s" % (disk_name, multipath_dev)
r, o, e = bash.bash_roe(command, errorout=True)
logger.debug("resized pv %s (wwid: %s), return code: %s, stdout %s, stderr: %s" %
(disk_name, self.identifier, r, o, e))
class MiniFileConverter(linux.AbstractFileConverter):
def __init__(self, cmd=None):
super(MiniFileConverter, self).__init__()
self.cmd = cmd
self.resourceId = os.path.basename(cmd.srcInstallPath) if cmd else None
def get_backing_file(self, path):
return linux.qcow2_direct_get_backing_file(path)
def convert_from_file_with_backing(self, src, dst, dst_backing, backing_fmt):
# type: (str, str, str, str) -> int
if self.cmd.srcInstallPath != src: # base:
return self._convert_image_from_file(src, dst, dst_backing, backing_fmt)
else: # top
return self._convert_volume_from_file(src, dst, dst_backing, backing_fmt)
def convert_to_file(self, src, dst):
drbd_res = drbd.DrbdResource(os.path.basename(src))
if drbd_res.exists:
drbd_res.dd_out(dst)
else:
if not os.path.exists(src):
lvm.active_lv(src, shared=True)
shell.call('dd if=%s of=%s conv=sparse bs=1M' % (src, dst))
def get_size(self, path):
# type: (str) -> int
return lvm.get_lv_size(path)
def exists(self, path):
# type: (str) -> bool
return os.path.exists(path)
def _convert_volume_from_file(self, src, dst, dst_backing, backing_fmt):
drbdResource = drbd.DrbdResource(os.path.basename(dst), False)
if not drbdResource.exists or drbdResource.config.local_host.minor != str(self.cmd.local_host_port - DRBD_START_PORT):
drbdResource.config.local_host.hostname = self.cmd.local_host_name
drbdResource.config.local_host.disk = dst
drbdResource.config.local_host.minor = self.cmd.local_host_port - DRBD_START_PORT
drbdResource.config.local_host.address = "%s:%s" % (self.cmd.local_address, self.cmd.local_host_port)
drbdResource.config.remote_host.hostname = self.cmd.remote_host_name
drbdResource.config.remote_host.disk = dst
drbdResource.config.remote_host.minor = self.cmd.remote_host_port - DRBD_START_PORT
drbdResource.config.remote_host.address = "%s:%s" % (self.cmd.remote_address, self.cmd.remote_host_port)
drbdResource.config.write_config()
size = linux.qcow2_get_virtual_size(src)
tag = "%s::%s::%s" % (VOLUME_TAG, self.cmd.hostUuid, time.time())
try:
if not lvm.lv_exists(dst):
lvm.create_lv_from_cmd(dst, size, self.cmd, tag, False)
lvm.active_lv(dst)
drbdResource.initialize_with_file(True, src, backing=dst_backing, backing_fmt=backing_fmt)
return size
except Exception as e:
drbdResource.destroy()
lvm.delete_lv(dst)
logger.debug('failed to convert lv from file[size:%s] at %s' % (size, dst))
raise e
def _convert_image_from_file(self, src, dst, dst_backing, backing_fmt):
try:
size = linux.qcow2_measure_required_size(src)
except Exception as e:
logger.warn("can not get qcow2 %s measure size: %s" % (src, e))
size = linux.get_local_file_size(src)
tag = "%s::%s::%s" % (IMAGE_TAG, self.cmd.hostUuid, time.time())
if not lvm.lv_exists(dst):
lvm.create_lv_from_cmd(dst, size, self.cmd, tag, False)
lvm.active_lv(dst)
bash.bash_errorout('dd if=%s of=%s bs=1M' % (src, dst))
if dst_backing:
linux.qcow2_rebase_no_check(dst_backing, dst, backing_fmt=backing_fmt)
return size
class MiniStoragePlugin(kvmagent.KvmAgent):
CONNECT_PATH = "/ministorage/connect"
DISCONNECT_PATH = "/ministorage/disconnect"
CREATE_VOLUME_FROM_CACHE_PATH = "/ministorage/createrootvolume"
DELETE_BITS_PATH = "/ministorage/bits/delete"
CREATE_TEMPLATE_FROM_VOLUME_PATH = "/ministorage/createtemplatefromvolume"
UPLOAD_BITS_TO_IMAGESTORE_PATH = "/ministorage/imagestore/upload"
COMMIT_BITS_TO_IMAGESTORE_PATH = "/ministorage/imagestore/commit"
DOWNLOAD_BITS_FROM_IMAGESTORE_PATH = "/ministorage/imagestore/download"
CREATE_EMPTY_VOLUME_PATH = "/ministorage/volume/createempty"
CREATE_SECONDARY_VOLUME = "/ministorage/volume/createsecondary"
CREATE_EMPTY_CACHE_VOLUME_PATH = "/ministorage/cachevolume/createempty"
CHECK_BITS_PATH = "/ministorage/bits/check"
RESIZE_VOLUME_PATH = "/ministorage/volume/resize"
CONVERT_IMAGE_TO_VOLUME = "/ministorage/image/tovolume"
CHANGE_VOLUME_ACTIVE_PATH = "/ministorage/volume/active"
GET_VOLUME_SIZE_PATH = "/ministorage/volume/getsize"
CHECK_DISKS_PATH = "/ministorage/disks/check"
MIGRATE_DATA_PATH = "/ministorage/volume/migrate"
REVERT_VOLUME_FROM_SNAPSHOT_PATH = "/ministorage/volume/revertfromsnapshot"
GET_QCOW2_REFERENCE = "/ministorage/getqcow2reference"
FLUSH_CACHE = "/ministorage/cache/flush"
UPLOAD_BITS_TO_FILESYSTEM_PATH = "/ministorage/filesystem/upload"
DOWNLOAD_BITS_FROM_FILESYSTEM_PATH = "/ministorage/filesystem/download"
SYNC_BACKING_CHAIN = "/ministorage/volume/syncbackingchain"
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.CONNECT_PATH, self.connect, cmd=ConnectCmd())
http_server.register_async_uri(self.DISCONNECT_PATH, self.disconnect)
http_server.register_async_uri(self.CREATE_VOLUME_FROM_CACHE_PATH, self.create_root_volume)
http_server.register_async_uri(self.DELETE_BITS_PATH, self.delete_bits)
http_server.register_async_uri(self.CREATE_TEMPLATE_FROM_VOLUME_PATH, self.create_template_from_volume)
http_server.register_async_uri(self.UPLOAD_BITS_TO_IMAGESTORE_PATH, self.upload_to_imagestore)
http_server.register_async_uri(self.COMMIT_BITS_TO_IMAGESTORE_PATH, self.commit_to_imagestore)
http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_IMAGESTORE_PATH, self.download_from_imagestore)
http_server.register_async_uri(self.CREATE_EMPTY_VOLUME_PATH, self.create_empty_volume)
http_server.register_async_uri(self.CREATE_SECONDARY_VOLUME, self.create_secondary_volume)
http_server.register_async_uri(self.CREATE_EMPTY_CACHE_VOLUME_PATH, self.create_empty_cache_volume)
http_server.register_async_uri(self.CONVERT_IMAGE_TO_VOLUME, self.convert_image_to_volume)
http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits)
http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume)
http_server.register_async_uri(self.CHANGE_VOLUME_ACTIVE_PATH, self.active_lv)
http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
http_server.register_async_uri(self.CHECK_DISKS_PATH, self.check_disks)
http_server.register_async_uri(self.REVERT_VOLUME_FROM_SNAPSHOT_PATH, self.revert_volume_from_snapshot)
http_server.register_async_uri(self.GET_QCOW2_REFERENCE, self.get_qcow2_reference)
http_server.register_async_uri(self.FLUSH_CACHE, self.flush_cache)
http_server.register_async_uri(self.UPLOAD_BITS_TO_FILESYSTEM_PATH, self.upload_to_filesystem)
http_server.register_async_uri(self.DOWNLOAD_BITS_FROM_FILESYSTEM_PATH, self.download_from_filesystem)
http_server.register_async_uri(self.SYNC_BACKING_CHAIN, self.sync_backing_chain)
self.imagestore_client = ImageStoreClient()
def stop(self):
pass
@kvmagent.replyerror
def check_disks(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentRsp()
for diskId in cmd.diskIdentifiers:
disk = CheckDisk(diskId)
path = disk.get_path()
if cmd.rescan:
disk.rescan(path.split("/")[-1])
if cmd.failIfNoPath:
linux.set_fail_if_no_path()
if cmd.vgUuid is not None and lvm.vg_exists(cmd.vgUuid):
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid, False)
return jsonobject.dumps(rsp)
@staticmethod
@bash.in_bash
def create_thin_pool_if_not_found(vgUuid, init_pool_ratio):
def round_sector(size, sector):
return round(float(size) / float(sector)) * sector
if lvm.lv_exists("/dev/%s/%s_thinpool" % (vgUuid, vgUuid)):
return
tot, avil = lvm.get_vg_size(vgUuid)
init_pool_size = float(tot) * float(init_pool_ratio)
# meta_size = "%s" % ((tot / DEFAULT_CHUNK_SIZE) * 48 * 2) # ref: https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt
meta_size = 1024**3 # ref: https://www.systutorials.com/docs/linux/man/7-lvmthin/#lbBD
bash.bash_errorout("lvcreate --type thin-pool -L %sB -c %sB --poolmetadatasize %sB -n %s_thinpool %s" %
(int(round_sector(init_pool_size, 4096)), DEFAULT_CHUNK_SIZE, meta_size, vgUuid, vgUuid))
@staticmethod
def create_vg_if_not_found(vgUuid, diskPaths, hostUuid, forceWipe=False):
@linux.retry(times=5, sleep_time=random.uniform(0.1, 3))
def find_vg(vgUuid, raise_exception=True):
cmd = shell.ShellCmd("timeout 5 vgscan --ignorelockingfailure; vgs --nolocking %s -otags | grep %s" % (vgUuid, INIT_TAG))
cmd(is_exception=False)
if cmd.return_code != 0 and raise_exception:
raise RetryException("can not find vg %s with tag %s" % (vgUuid, INIT_TAG))
elif cmd.return_code != 0:
return False
return True
try:
find_vg(vgUuid)
except RetryException as e:
if forceWipe is True:
running_vm = bash.bash_o("virsh list | grep -E 'running|paused' | awk '{print $2}'").strip().split()
if running_vm != [] and running_vm[0] != "":
for vm in running_vm:
bash.bash_r("virsh destroy %s" % vm)
r = bash.bash_r("drbdadm down all")
if r == 0:
bash.bash_r("mkdir -p %s" % BACKUP_DIR)
bash.bash_r("mv /etc/drbd.d/*.res %s" % BACKUP_DIR)
mini_cache_volume_mount_dir = "/var/lib/zstack/colo/cachevolumes/"
linux.umount_by_path(mini_cache_volume_mount_dir)
linux.rm_dir_force(mini_cache_volume_mount_dir)
lvm.wipe_fs(diskPaths, vgUuid)
cmd = shell.ShellCmd("vgcreate -qq --addtag '%s::%s::%s::%s' --metadatasize %s %s %s" %
(INIT_TAG, hostUuid, time.time(), bash.bash_o("hostname").strip(),
DEFAULT_VG_METADATA_SIZE, vgUuid, " ".join(diskPaths)))
cmd(is_exception=False)
logger.debug("created vg %s, ret: %s, stdout: %s, stderr: %s" %
(vgUuid, cmd.return_code, cmd.stdout, cmd.stderr))
if cmd.return_code == 0 and find_vg(vgUuid, False) is True:
return True
try:
if find_vg(vgUuid) is True:
return True
except RetryException as ee:
raise Exception("can not find vg %s with disks: %s and create vg return: %s %s %s " %
(vgUuid, diskPaths, cmd.return_code, cmd.stdout, cmd.stderr))
except Exception as ee:
raise ee
except Exception as e:
raise e
return False
@kvmagent.replyerror
@lock.file_lock(LOCK_FILE, debug=True)
def connect(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ConnectRsp()
diskPaths = set()
def config_lvm(enableLvmetad=False):
lvm.backup_lvm_config()
lvm.reset_lvm_conf_default()
if enableLvmetad:
lvm.config_lvm_by_sed("use_lvmetad", "use_lvmetad=1", ["lvm.conf", "lvmlocal.conf"])
else:
lvm.config_lvm_by_sed("use_lvmetad", "use_lvmetad=0", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_by_sed("issue_discards", "issue_discards=1", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_by_sed("reserved_stack", "reserved_stack=256", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_by_sed("reserved_memory", "reserved_memory=131072", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_by_sed("thin_pool_autoextend_threshold", "thin_pool_autoextend_threshold=80", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_by_sed("snapshot_autoextend_threshold", "snapshot_autoextend_threshold=80", ["lvm.conf", "lvmlocal.conf"])
lvm.config_lvm_filter(["lvm.conf", "lvmlocal.conf"], True)
def config_drbd():
bash.bash_r("sed -i 's/usage-count yes/usage-count no/g' /etc/drbd.d/global_common.conf")
drbd.install_drbd()
config_lvm()
config_drbd()
for diskId in cmd.diskIdentifiers:
disk = CheckDisk(diskId)
diskPaths.add(disk.get_path())
logger.debug("find/create vg %s ..." % cmd.vgUuid)
self.create_vg_if_not_found(cmd.vgUuid, diskPaths, cmd.hostUuid, cmd.forceWipe)
self.create_thin_pool_if_not_found(cmd.vgUuid, INIT_POOL_RATIO)
drbd.up_all_resouces()
if lvm.lvm_check_operation(cmd.vgUuid) is False:
logger.warn("lvm operation test failed!")
lvm.clean_vg_exists_host_tags(cmd.vgUuid, cmd.hostUuid, HEARTBEAT_TAG)
lvm.add_vg_tag(cmd.vgUuid, "%s::%s::%s::%s" % (HEARTBEAT_TAG, cmd.hostUuid, time.time(), bash.bash_o('hostname').strip()))
if cmd.fencerAddress:
lvm.clean_vg_exists_host_tags(cmd.vgUuid, '\'\'', FENCER_TAG)
lvm.add_vg_tag(cmd.vgUuid, "%s::%s" % (FENCER_TAG, cmd.fencerAddress))
lvm.clean_vg_exists_host_tags(cmd.vgUuid, '\'\'', MANAGEMENT_TAG)
lvm.add_vg_tag(cmd.vgUuid, "%s::%s" % (MANAGEMENT_TAG, cmd.magementAddress))
self.generate_fencer(cmd.peerManagementAddress, cmd.peerSshUsername, cmd.peerSshPassword)
if cmd.storageNetworkCidr is not None:
nics = linux.get_nics_by_cidr(cmd.storageNetworkCidr)
if len(nics) != 0:
rsp.storageNetworkAddress = nics[0].values()[0]
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
rsp.vgLvmUuid = lvm.get_vg_lvm_uuid(cmd.vgUuid)
rsp.hostUuid = cmd.hostUuid
logger.debug("mini primary storage[uuid: %s] on host[uuid: %s] connected" % (cmd.vgUuid, cmd.hostUuid))
return jsonobject.dumps(rsp)
@staticmethod
@bash.in_bash
def generate_fencer(peer_addr, peer_username, peer_password):
def configure_ssh_key():
bash.bash_roe("/bin/rm %s*" % mini_fencer.MINI_FENCER_KEY)
bash.bash_roe("ssh-keygen -P \"\" -f %s" % mini_fencer.MINI_FENCER_KEY)
ssh_pswd_file = linux.write_to_temp_file(peer_password)
r, o, e = bash.bash_roe("sshpass -f %s ssh-copy-id -i %s %s@%s" % (ssh_pswd_file, mini_fencer.MINI_FENCER_KEY, peer_username, peer_addr))
linux.write_to_temp_file(ssh_pswd_file)
if r == 0:
return
configure_ssh_key()
current_dir = os.path.split(os.path.realpath(__file__))[0]
fencer_path = "%s/mini_fencer.py" % current_dir
bash.bash_roe("sed -i 's/^PEER_USERNAME = .*$/PEER_USERNAME = \"%s\"/g' %s" % (peer_username, fencer_path))
bash.bash_roe("sed -i 's/^PEER_MGMT_ADDR = .*$/PEER_MGMT_ADDR = \"%s\"/g' %s" % (peer_addr, fencer_path))
bash.bash_roe("cp %s /usr/lib/drbd/mini_fencer.py" % fencer_path)
linux.sync_file(fencer_path)
linux.sync_file("/usr/lib/drbd/mini_fencer.py")
os.chmod("/usr/lib/drbd/mini_fencer.py", 0o755)
@kvmagent.replyerror
@lock.file_lock(LOCK_FILE)
def disconnect(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentRsp()
@linux.retry(times=3, sleep_time=random.uniform(0.1, 3))
def find_vg(vgUuid):
cmd = shell.ShellCmd("vgs --nolocking %s -otags | grep %s" % (vgUuid, INIT_TAG))
cmd(is_exception=False)
if cmd.return_code == 0:
return True
logger.debug("can not find vg %s with tag %s" % (vgUuid, INIT_TAG))
cmd = shell.ShellCmd("vgs %s" % vgUuid)
cmd(is_exception=False)
if cmd.return_code == 0:
logger.warn("found vg %s without tag %s" % (vgUuid, INIT_TAG))
return True
raise RetryException("can not find vg %s with or without tag %s" % (vgUuid, INIT_TAG))
@linux.retry(times=3, sleep_time=random.uniform(0.1, 3))
def deactive_drbd_resouces_on_vg(vgUuid):
active_lvs = lvm.list_local_active_lvs(vgUuid)
if len(active_lvs) == 0:
return
drbd_resources = [drbd.DrbdResource(lv.split("/")[-1]) for lv in active_lvs]
for r in drbd_resources:
r.destroy()
logger.warn("active lvs %s will be deactivate" % active_lvs)
lvm.deactive_lv(vgUuid)
active_lvs = lvm.list_local_active_lvs(vgUuid)
if len(active_lvs) != 0:
raise RetryException("lvs [%s] still active, retry deactive again" % active_lvs)
try:
find_vg(cmd.vgUuid)
except RetryException:
logger.debug("can not find vg %s; return success" % cmd.vgUuid)
return jsonobject.dumps(rsp)
except Exception as e:
raise e
deactive_drbd_resouces_on_vg(cmd.vgUuid)
lvm.clean_vg_exists_host_tags(cmd.vgUuid, cmd.hostUuid, HEARTBEAT_TAG)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
@lock.file_lock(LOCK_FILE)
def add_disk(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
disk = CheckDisk(cmd.diskUuid)
command = shell.ShellCmd("vgs --nolocking %s -otags | grep %s" % (cmd.vgUuid, INIT_TAG))
command(is_exception=False)
if command.return_code != 0:
self.create_vg_if_not_found(cmd.vgUuid, [disk.get_path()], cmd.hostUuid, cmd.forceWipe)
else:
if cmd.forceWipe is True:
lvm.wipe_fs([disk.get_path()], cmd.vgUuid)
lvm.add_pv(cmd.vgUuid, disk.get_path(), DEFAULT_VG_METADATA_SIZE)
rsp = AgentRsp
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def resize_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
rsp = ResizeVolumeRsp()
if not cmd.drbd:
lvm.resize_lv_from_cmd(install_abs_path, cmd.size, cmd)
return jsonobject.dumps(rsp)
r = drbd.DrbdResource(cmd.installPath.split("/")[-1])
r._init_from_disk(install_abs_path)
with drbd.OperateDrbd(r):
r.resize()
with drbd.OperateDrbd(r):
fmt = linux.get_img_fmt(r.get_dev_path())
if not cmd.live and fmt == 'qcow2':
shell.call("qemu-img resize %s %s" % (r.get_dev_path(), cmd.size))
ret = linux.qcow2_virtualsize(r.get_dev_path())
rsp.size = ret
rsp._init_from_drbd(r)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_root_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
template_abs_path_cache = get_absolute_path_from_install_path(cmd.templatePathInCache)
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
drbdResource = drbd.DrbdResource(self.get_name_from_installPath(cmd.installPath), False)
drbdResource.config.local_host.hostname = cmd.local_host_name
drbdResource.config.local_host.disk = install_abs_path
drbdResource.config.local_host.minor = cmd.local_host_port - DRBD_START_PORT
drbdResource.config.local_host.address = "%s:%s" % (cmd.local_address, cmd.local_host_port)
drbdResource.config.remote_host.hostname = cmd.remote_host_name
drbdResource.config.remote_host.disk = install_abs_path
drbdResource.config.remote_host.minor = cmd.remote_host_port - DRBD_START_PORT
drbdResource.config.remote_host.address = "%s:%s" % (cmd.remote_address, cmd.remote_host_port)
drbdResource.config.write_config()
virtual_size = linux.qcow2_virtualsize(template_abs_path_cache)
try:
lvm.qcow2_lv_recursive_active(template_abs_path_cache, lvm.LvmlockdLockType.SHARE)
if not lvm.lv_exists(install_abs_path):
lvm.create_lv_from_cmd(install_abs_path, virtual_size, cmd,
"%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(install_abs_path)
drbdResource.initialize(cmd.init, cmd, template_abs_path_cache)
except Exception as e:
drbdResource.destroy()
lvm.delete_lv(install_abs_path)
raise e
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def delete_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = AgentRsp()
if cmd.folder:
raise Exception("not support this operation")
self.do_delete_bits(cmd.path)
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
return jsonobject.dumps(rsp)
def do_delete_bits(self, path):
install_abs_path = get_absolute_path_from_install_path(path)
if lvm.has_lv_tag(install_abs_path, IMAGE_TAG):
logger.info('deleting lv image: ' + install_abs_path)
if lvm.lv_exists(install_abs_path):
lvm.delete_image(install_abs_path, IMAGE_TAG, deactive=False)
else:
linux.umount_by_url(install_abs_path)
logger.info('deleting lv volume: ' + install_abs_path)
r = drbd.DrbdResource(self.get_name_from_installPath(path))
if r.exists is True:
r.destroy()
lvm.delete_lv(install_abs_path, deactive=False)
lvm.delete_snapshots(install_abs_path)
@kvmagent.replyerror
def get_qcow2_reference(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetQCOW2ReferenceRsp()
rsp.referencePaths = []
real_path = get_absolute_path_from_install_path(cmd.path)
for f in lvm.list_local_active_lvs(cmd.vgUuid):
try:
backing_file = linux.qcow2_direct_get_backing_file(f)
if backing_file in [real_path ,cmd.path]:
rsp.referencePaths.append(f)
except Exception as e:
logger.warn(e)
continue
for f in bash.bash_o("ls -l /dev/drbd* | grep -E '^b' | awk '{print $NF}'").splitlines():
f = f.strip()
if f == "":
continue
try:
if linux.qcow2_get_backing_file(f) in [real_path, cmd.path]:
rsp.referencePaths.append(f)
except Exception as e:
logger.warn(e)
continue
logger.debug("find qcow2 %s referencess: %s" % (real_path, rsp.referencePaths))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def flush_cache(self, req):
shell.call("/opt/MegaRAID/MegaCli/MegaCli64 -AdpCacheFlush -aAll")
return jsonobject.dumps(AgentRsp())
@kvmagent.replyerror
def create_template_from_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
volume_abs_path = get_absolute_path_from_install_path(cmd.volumePath)
snap_name = cmd.installPath.split("/")[-1]
r = drbd.DrbdResource(volume_abs_path.split("/")[-1])
with drbd.OperateDrbd(r):
lvm.create_lvm_snapshot(volume_abs_path, snapName=snap_name, drbd_path=r.get_dev_path())
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.upload_to_imagestore(cmd, req)
@kvmagent.replyerror
def commit_to_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
return self.imagestore_client.commit_to_imagestore(cmd, req)
@kvmagent.replyerror
def download_from_imagestore(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
if cmd.size is not None and cmd.provisioning is not None:
lvm.create_lv_from_cmd(self.convertInstallPathToAbsolute(cmd.primaryStorageInstallPath), cmd.size, cmd,
"%s::%s::%s" % (IMAGE_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(self.convertInstallPathToAbsolute(cmd.primaryStorageInstallPath))
self.imagestore_client.download_from_imagestore(cmd.mountPoint, cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
rsp = AgentRsp()
return jsonobject.dumps(rsp)
@staticmethod
def convertInstallPathToAbsolute(path):
# type: (str) -> str
return path.replace("mini:/", "/dev")
@staticmethod
def convertInstallPathToMount(path):
# type: (string) -> string
return path.replace("mini:/", "/tmp")
@kvmagent.replyerror
def create_empty_cache_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
try:
if not lvm.lv_exists(install_abs_path):
lvm.create_lv_from_cmd(install_abs_path, cmd.size, cmd,
"%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(install_abs_path)
shell.call("mkfs.ext4 -F %s" % install_abs_path)
mountPath = self.convertInstallPathToMount(cmd.installPath)
if not os.path.exists(mountPath):
linux.mkdir(mountPath)
if not linux.is_mounted(cmd.mountPath):
linux.mount(install_abs_path, mountPath)
linux.qcow2_create(mountPath + '/' + mountPath.rsplit('/', 1)[-1], cmd.size)
linux.umount(mountPath)
linux.rmdir_if_empty(mountPath)
lvm.deactive_lv(install_abs_path)
except Exception as e:
lvm.delete_lv(install_abs_path)
logger.debug('failed to create empty volume[uuid:%s, size:%s] at %s' %
(cmd.volumeUuid, cmd.size, cmd.installPath))
raise e
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s and mount' %
(cmd.volumeUuid, cmd.size, cmd.installPath))
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def upload_to_filesystem(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = UploadBitsToFileSystemRsp()
vol_path = self.convertInstallPathToAbsolute(cmd.srcInstallPath)
dst_dir = os.path.dirname(cmd.dstInstallPath)
if not cmd.skipIfExisting:
linux.rm_dir_force(dst_dir)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0755)
linux.upload_chain_to_filesystem(MiniFileConverter(), vol_path, dst_dir, overwrite=not cmd.skipIfExisting)
rsp.totalSize = linux.get_filesystem_folder_size(dst_dir)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def download_from_filesystem(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = DownloadBitsFromFileSystemRsp()
dst_dir = os.path.dirname(self.convertInstallPathToAbsolute(cmd.dstInstallPath))
chain_info = linux.download_chain_from_filesystem(MiniFileConverter(cmd=cmd), cmd.srcInstallPath, dst_dir,
overwrite=not cmd.skipIfExisting)
for info in chain_info:
rsp.downloadedInfos.append(LvInfo(install_path=info[0], size=info[1]))
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def sync_backing_chain(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
remote_hostname = cmd.remoteHostname
# rsync cannot satisfy without --keep-links(it is supposed to be similar to --keep-dirlinks)
for info in cmd.backingFileInfos:
if not lvm.lv_exists(info.installPath):
lvm.create_lv_from_cmd(info.installPath, info.size, cmd,
"%s::%s::%s" % (IMAGE_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(info.installPath)
sync_command = "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s " \
"dd if=%s bs=1M | dd of=%s bs=1M" % \
(remote_hostname, info.installPath, info.installPath)
shell.call(sync_command)
rsp = AgentRsp()
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_empty_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
drbdResource = drbd.DrbdResource(self.get_name_from_installPath(cmd.installPath), False)
drbdResource.config.local_host.hostname = cmd.local_host_name
drbdResource.config.local_host.disk = install_abs_path
drbdResource.config.local_host.minor = cmd.local_host_port - DRBD_START_PORT
drbdResource.config.local_host.address = "%s:%s" % (cmd.local_address, cmd.local_host_port)
drbdResource.config.remote_host.hostname = cmd.remote_host_name
drbdResource.config.remote_host.disk = install_abs_path
drbdResource.config.remote_host.minor = cmd.remote_host_port - DRBD_START_PORT
drbdResource.config.remote_host.address = "%s:%s" % (cmd.remote_address, cmd.remote_host_port)
drbdResource.config.write_config()
try:
if cmd.backingFile:
backing_abs_path = get_absolute_path_from_install_path(cmd.backingFile)
virtual_size = linux.qcow2_virtualsize(backing_abs_path)
lvm.create_lv_from_cmd(install_abs_path, virtual_size, cmd,
"%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(install_abs_path)
drbdResource.initialize(cmd.init, cmd, backing_abs_path)
elif not lvm.lv_exists(install_abs_path):
lvm.create_lv_from_cmd(install_abs_path, cmd.size, cmd,
"%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(install_abs_path)
drbdResource.initialize(cmd.init, cmd)
except Exception as e:
drbdResource.destroy()
lvm.delete_lv(install_abs_path)
logger.debug('failed to create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installPath))
raise e
logger.debug('successfully create empty volume[uuid:%s, size:%s] at %s' % (cmd.volumeUuid, cmd.size, cmd.installPath))
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def create_secondary_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
drbdResource = drbd.DrbdResource(self.get_name_from_installPath(cmd.installPath), False)
if not drbdResource.exists or drbdResource.config.local_host.minor != str(cmd.local_host_port - DRBD_START_PORT):
drbdResource.config.local_host.hostname = cmd.local_host_name
drbdResource.config.local_host.disk = install_abs_path
drbdResource.config.local_host.minor = cmd.local_host_port - DRBD_START_PORT
drbdResource.config.local_host.address = "%s:%s" % (cmd.local_address, cmd.local_host_port)
drbdResource.config.remote_host.hostname = cmd.remote_host_name
drbdResource.config.remote_host.disk = install_abs_path
drbdResource.config.remote_host.minor = cmd.remote_host_port - DRBD_START_PORT
drbdResource.config.remote_host.address = "%s:%s" % (cmd.remote_address, cmd.remote_host_port)
drbdResource.config.write_config()
try:
if not lvm.lv_exists(install_abs_path):
lvm.create_lv_from_cmd(install_abs_path, cmd.size, cmd,
"%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()), False)
lvm.active_lv(install_abs_path)
drbdResource.initialize(primary=False, cmd=cmd)
except Exception as e:
drbdResource.destroy()
lvm.delete_lv(install_abs_path)
logger.debug('failed to create secondary volume[uuid:%s, size:%s] at %s' % (cmd.resourceUuid, cmd.size, cmd.installPath))
raise e
logger.debug('successfully create secondary volume[uuid:%s, size:%s] at %s' % (cmd.resourceUuid, cmd.size, cmd.installPath))
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
@staticmethod
def get_name_from_installPath(path):
return path.split("/")[3]
@kvmagent.replyerror
def convert_image_to_volume(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = VolumeRsp()
install_abs_path = get_absolute_path_from_install_path(cmd.primaryStorageInstallPath)
lvm.active_lv(install_abs_path)
lvm.clean_lv_tag(install_abs_path, IMAGE_TAG)
lvm.add_lv_tag(install_abs_path, "%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time()))
lvm.delete_lv_meta(install_abs_path)
drbdResource = drbd.DrbdResource(install_abs_path.split("/")[-1], False)
drbdResource.config.local_host.hostname = cmd.local_host_name
drbdResource.config.local_host.disk = install_abs_path
drbdResource.config.local_host.minor = cmd.local_host_port - DRBD_START_PORT
drbdResource.config.local_host.address = "%s:%s" % (cmd.local_address, cmd.local_host_port)
drbdResource.config.remote_host.hostname = cmd.remote_host_name
drbdResource.config.remote_host.disk = install_abs_path
drbdResource.config.remote_host.minor = cmd.remote_host_port - DRBD_START_PORT
drbdResource.config.remote_host.address = "%s:%s" % (cmd.remote_address, cmd.remote_host_port)
drbdResource.config.write_config()
drbdResource.initialize(False, None, skip_clear_bits=cmd.init)
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def check_bits(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = CheckBitsRsp()
if cmd.path is not None:
install_abs_path = get_absolute_path_from_install_path(cmd.path)
rsp.existing = lvm.lv_exists(install_abs_path)
else:
rsp = self.replications_status()
if cmd.vgUuid is not None and lvm.vg_exists(cmd.vgUuid):
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid, False)
if cmd.peerIps is None:
return jsonobject.dumps(rsp)
# successCount = 0
# for ip in cmd.peerIps:
# if self.test_network_ok_to_peer(ip):
# successCount += 1
# if successCount == cmd.peerIps:
rsp.storageNetworkStatus = "Connected"
# elif successCount > 0:
# rsp.storageNetworkStatus = "PartialConnected"
# else:
# rsp.storageNetworkStatus = "Disconnected"
return jsonobject.dumps(rsp)
@staticmethod
def replications_status():
# type: () -> CheckBitsRsp
r = CheckBitsRsp()
raw = linux.read_file("/proc/drbd")
for line in raw.splitlines():
try:
splited = line.strip().split(" ")
if ": cs:" in line:
info = ReplicationInformation()
info.minor = splited[0].split(":")[0]
info.networkStatus = splited[1].split(":")[1]
info.diskStatus = splited[3].split(":")[1].split("/")[0]
info.role = splited[2].split(":")[1].split("/")[0]
configPath = bash.bash_o("grep 'minor %s;' /etc/drbd.d/*.res -l | awk -F '.res' '{print $1}'" % info.minor).strip()
info.name = configPath.split("/")[-1]
size = lvm.get_lv_size(
bash.bash_o("grep -E 'disk .*/dev' %s.res | head -n1 | awk '{print $2}'" % configPath).strip().strip(";"))
if size != '':
info.size = int(size)
r.replications[info.name] = info
except Exception as e:
logger.warn("exception %s when get info of %s" % (e, line))
logger.debug(jsonobject.dumps(r.replications))
return r
@staticmethod
def handle_cache_volume(drbd_role, mount_path, install_abs_path):
if drbd_role == drbd.DrbdRole.Primary:
lvm.active_lv(install_abs_path)
if not os.path.exists(mount_path):
linux.mkdir(mount_path)
if not linux.is_mounted(mount_path, install_abs_path):
linux.mount(install_abs_path, mount_path)
logger.debug("successfully mount %s to %s" % (install_abs_path, mount_path))
else:
if linux.is_mounted(mount_path):
linux.umount(mount_path)
lvm.deactive_lv(install_abs_path)
@kvmagent.replyerror
def active_lv(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = ActiveRsp()
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid, raise_exception=False)
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
if lvm.has_lv_tag(install_abs_path, IMAGE_TAG):
lvm.qcow2_lv_recursive_active(install_abs_path, lvm.LvmlockdLockType.SHARE)
return jsonobject.dumps(rsp)
if cmd.mountPath:
self.handle_cache_volume(cmd.role, cmd.mountPath, install_abs_path)
return jsonobject.dumps(rsp)
drbdResource = drbd.DrbdResource(self.get_name_from_installPath(cmd.installPath))
if cmd.role == drbd.DrbdRole.Secondary:
drbdResource.demote()
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
if drbdResource.exists is False:
raise Exception("can not find volume %s" % cmd.installPath)
if self.test_network_ok_to_peer(drbdResource.config.remote_host.address.split(":")[0]) is False \
and mini_fencer.test_fencer(cmd.vgUuid, drbdResource.name) is False:
raise Exception("can not connect storage network or fencer")
if cmd.checkPeer and drbdResource.get_remote_role() == drbd.DrbdRole.Primary:
raise Exception("remote is also in primary role, can not promote")
if drbdResource.get_dstate() != "UpToDate":
raise Exception("local data is not uptodate, can not promote")
lvm.qcow2_lv_recursive_active(install_abs_path, lvm.LvmlockdLockType.EXCLUSIVE)
try:
drbdResource.promote()
except Exception as e:
if not cmd.force:
raise e
if self.test_network_ok_to_peer(drbdResource.config.remote_host.address.split(":")[0]):
raise Exception("storage network address %s still connected, wont force promote" %
drbdResource.config.remote_host.address.split(":")[0])
if cmd.vmNics:
for vmNic in cmd.vmNics:
if self.test_network_ok_to_peer(vmNic.ipAddress, vmNic.bridgeName):
raise Exception("could arping %s via %s, it may split brain, wont proceed force promote"
% (vmNic.ipAddress, vmNic.bridgeName))
snap_path = None
try:
snap_path = lvm.create_lvm_snapshot(install_abs_path)
drbdResource.promote(True, 2, 2)
rsp.snapPath = snap_path
except Exception as ee:
if snap_path is not None:
lvm.delete_lv(snap_path)
raise ee
rsp._init_from_drbd(drbdResource)
return jsonobject.dumps(rsp)
@staticmethod
@bash.in_bash
def test_network_ok_to_peer(peer_address, via_dev=None):
if not via_dev:
via_dev = bash.bash_o("ip -o r get %s | awk '{print $3}'" % peer_address).strip()
for i in range(5):
recv = bash.bash_r("timeout 2 arping -w 1 -b %s -I %s -c 1" % (peer_address, via_dev))
if recv == 0:
return True
return False
@kvmagent.replyerror
def get_volume_size(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = GetVolumeSizeRsp()
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
r = drbd.DrbdResource(cmd.installPath.split("/")[-1])
with drbd.OperateDrbd(r):
rsp.size = linux.qcow2_virtualsize(r.get_dev_path())
rsp.actualSize = lvm.get_lv_size(install_abs_path)
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
rsp._init_from_drbd(r)
return jsonobject.dumps(rsp)
@kvmagent.replyerror
def revert_volume_from_snapshot(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = RevertVolumeFromSnapshotRsp()
snapshot_abs_path = get_absolute_path_from_install_path(cmd.snapshotInstallPath)
install_abs_path = get_absolute_path_from_install_path(cmd.installPath)
rsp.size = False
rsp.error = "not supported yet!"
rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid)
return rsp
|
{
"content_hash": "66b48531a613dda23f1b01b0135c9587",
"timestamp": "",
"source": "github",
"line_count": 1185,
"max_line_length": 151,
"avg_line_length": 43.23628691983122,
"alnum_prop": 0.6135259100224456,
"repo_name": "zstackorg/zstack-utility",
"id": "78df340343c3f1c2fc33860394d164aed62e026e",
"size": "51235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kvmagent/kvmagent/plugins/mini_storage_plugin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2093719"
},
{
"name": "Shell",
"bytes": "232075"
}
],
"symlink_target": ""
}
|
def modular_inverse(a, n):
"""
Used for division in elliptic curves. Very important in RSA/ECDSA algorithms.
It uses EGCD.
Extended Euclidean Algorithm:
- http://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
- http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
"""
lm, hm = 1,0
low, high = a%n,n
while low > 1:
ratio = high/low
nm, new = hm-lm*ratio, high-low*ratio
lm, low, hm, high = nm, new, lm, low
return lm % n
|
{
"content_hash": "824b31d9af8d993ac73d4391dd530b28",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 104,
"avg_line_length": 36.06666666666667,
"alnum_prop": 0.6321626617375231,
"repo_name": "LaPlataPy/btcutils",
"id": "79657f3ca28920652293ad3465580f7ccf933f19",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5091"
}
],
"symlink_target": ""
}
|
from i3pystatus import IntervalModule, formatp
class Uptime(IntervalModule):
"""
Outputs Uptime
.. rubric:: Available formatters
* `{days}` - uptime in days
* `{hours}` - rest of uptime in hours
* `{mins}` - rest of uptime in minutes
* `{secs}` - rest of uptime in seconds
* `{uptime}` - deprecated: equals '`{hours}:{mins}`'
"""
settings = (
("format", "Format string"),
("color", "String color"),
("alert", "If you want the string to change color"),
("seconds_alert", "How many seconds necessary to start the alert"),
("color_alert", "Alert color"),
)
file = "/proc/uptime"
format = "up {hours}:{mins}"
color = "#ffffff"
alert = False
seconds_alert = 60 * 60 * 24 * 30 # 30 days
color_alert = "#ff0000"
def run(self):
with open(self.file, "r") as f:
seconds = int(float(f.read().split()[0]))
days = seconds // (60 * 60 * 24)
hours = seconds // (60 * 60)
minutes = seconds // 60
if "{days}" in self.format:
hours = (seconds % (60 * 60 * 24)) // (60 * 60)
minutes = (seconds % (60 * 60 * 24)) // 60
seconds = (seconds % (60 * 60 * 24))
if "{hours}" in self.format:
minutes = (seconds % (60 * 60)) // 60
seconds = (seconds % (60 * 60))
if "{mins}" in self.format:
seconds = seconds % 60
fdict = {
"days": days,
"hours": hours,
"mins": minutes,
"secs": seconds,
"uptime": "{}:{}".format(hours, minutes),
}
if self.alert:
if seconds > self.seconds_alert:
self.color = self.color_alert
self.output = {
"full_text": formatp(self.format, **fdict),
"color": self.color
}
|
{
"content_hash": "34f4e2f7d350e54b56b53aed8149ee8a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 29.825396825396826,
"alnum_prop": 0.49334752527940395,
"repo_name": "Elder-of-Ozone/i3pystatus",
"id": "6027e0e1af767ec88cc9fa250b2ed3b90f126168",
"size": "1880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "i3pystatus/uptime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "228385"
},
{
"name": "Shell",
"bytes": "794"
}
],
"symlink_target": ""
}
|
'''
Created on 2014��11��12��
@author: ���
'''
import unittest
import feedin.engine
import json
import os
class SimpleFileFeedTest(unittest.TestCase):
def setUp(self):
self.engine = feedin.engine.Engine()
def tearDown(self):
pass
def get_conf_file(self, file):
dir = os.path.dirname(__file__)
return os.path.join(dir, file)
def test_start(self):
setting_file = 'sample.feed.json'
file_path = self.get_conf_file(setting_file)
with open(file_path, 'r') as f:
feed_setting = f.read()
feedjob = self.engine.create(feed_setting)
self.assertIsNotNone(feedjob, "feedjob should not be none")
self.assertIsNotNone(feedjob.modules_tree_root, "feedjob should have mudule")
feedjob.execute()
print feedjob.context.items
for item in feedjob.context.items:
print item.text('read'), item.text('comments'), item.text('title'), \
item.text('url'), item.text('channel'), item.text('author'), item.text("pagedetail")
def test_fetchonly(self):
setting_file = 'fetchonly.feed.json'
file_path = self.get_conf_file(setting_file)
with open(file_path, 'r') as f:
feed_setting = f.read()
feedjob = self.engine.create(feed_setting)
self.assertIsNotNone(feedjob, "feedjob should not be none")
self.assertIsNotNone(feedjob.modules_tree_root, "feedjob should have mudule")
feedjob.execute()
print feedjob.context.items
def test_loop(self):
setting_file = 'loop.feed.json'
file_path = self.get_conf_file(setting_file)
with open(file_path, 'r') as f:
feed_setting = f.read()
feedjob = self.engine.create(feed_setting)
self.assertIsNotNone(feedjob, "feedjob should not be none")
self.assertIsNotNone(feedjob.modules_tree_root, "feedjob should have mudule")
feedjob.execute()
print feedjob.context.items
for item in feedjob.context.items:
print item.text('read'), item.text('comments'), item.text('title'), item.text('url'), \
item.text('channel'), item.text('author'), item.text('pagedetail')
if __name__ == "__main__":
import sys;sys.argv = ['', 'SimpleFileFeedTest.test_loop']
unittest.main()
|
{
"content_hash": "207fb7a0eb5cfe734884cb53de58ca84",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 100,
"avg_line_length": 33.65714285714286,
"alnum_prop": 0.616723259762309,
"repo_name": "kevenli/FeedIn",
"id": "26c24317fb11223899d86ce8861182ff1ffa3966",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/simplefilefeed/simplefilefeedtest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "106298"
},
{
"name": "Python",
"bytes": "52804"
}
],
"symlink_target": ""
}
|
import hashlib
def get_md5_value(key):
value = None
try:
value = hashlib.md5(key).hexdigest()
except Exception, e:
print e
return value
|
{
"content_hash": "bf1df17086d9230950366e0a7c103670",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 17,
"alnum_prop": 0.6,
"repo_name": "AisinoPythonTeam/PythonAiniso",
"id": "8d6130729a851af4251ebf1ea8a71e21a4adce5a",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ic_crawler/gsgj_phone/gsgj_phone/util/md5_handle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168711"
}
],
"symlink_target": ""
}
|
import lasagne.layers as ll
import theano.tensor as T
from lasagne.objectives import categorical_crossentropy
from lasagne import nonlinearities
from layers import DenseLayerWithReg, Conv2DLayerWithReg
from theano.ifelse import ifelse
import theano
import numpy as np
import time
class MLP(object):
def __init__(self, x, y, args):
self.params_theta = []
self.params_lambda = []
self.params_weight = []
if args.dataset == 'mnist':
input_size = (None, 28*28)
elif args.dataset == 'cifar10':
input_size = (None, 3, 32*32)
else:
raise AssertionError
layers = [ll.InputLayer(input_size)]
penalty = theano.shared(np.array(0.))
for (k, num) in enumerate(args.MLPlayer):
# the last layer should use softmax
if k == len(args.MLPlayer) - 1:
# layers.append(ll.DenseLayer(layers[-1], num, nonlinearity=nonlinearities.softmax))
layers.append(DenseLayerWithReg(args, layers[-1], num_units=num,
nonlinearity=nonlinearities.softmax))
else:
# layers.append(ll.DenseLayer(layers[-1], num))
layers.append(DenseLayerWithReg(args, layers[-1], num_units=num))
if layers[-1].W is not None:
self.params_theta += [layers[-1].W, layers[-1].b]
self.params_weight += [layers[-1].W]
# define new regularization term for a layer
if args.regL2 is True:
tempL2 = layers[-1].L2 * T.sqr(layers[-1].W)
penalty += T.sum(tempL2)
self.params_lambda += [layers[-1].L2]
if args.regL1 is True:
tempL1 = layers[-1].L1 * layers[-1].W
penalty += T.sum(tempL1)
self.params_lambda += [layers[-1].L1]
self.layers = layers
self.y = ll.get_output(layers[-1], x, deterministic=False)
self.prediction = T.argmax(self.y, axis=1)
self.penalty = penalty
# self.penalty = penalty if penalty != 0. else T.constant(0.)
print(self.params_lambda)
# time.sleep(20)
# cost function
self.loss = T.mean(categorical_crossentropy(self.y, y))
self.lossWithPenalty = T.add(self.loss, self.penalty)
print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty)
# self.classError = T.mean(T.cast(T.neq(self.prediction, y), 'float32'))
class ConvNet(object):
def add_params_to_self(self, args, layer):
if layer.W is not None:
self.params_theta += [layer.W, layer.b]
self.params_weight += [layer.W]
# define new regularization term for a layer
if args.regL2 is True:
tempL2 = layer.L2 * T.sqr(layer.W)
self.penalty += T.sum(tempL2)
self.params_lambda += [layer.L2]
if args.regL1 is True:
tempL1 = layer.L1 * layer.W
self.penalty += T.sum(tempL1)
self.params_lambda += [layer.L1]
def __init__(self, x, y, args):
self.params_theta = []
self.params_lambda = []
self.params_weight = []
if args.dataset == 'mnist':
input_size = (None, 1, 28, 28)
elif args.dataset == 'cifar10':
input_size = (None, 3, 32, 32)
else:
raise AssertionError
layers = [ll.InputLayer(input_size)]
self.penalty = theano.shared(np.array(0.))
#conv1
layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
self.add_params_to_self(args, layers[-1])
layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
#conv1
layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
self.add_params_to_self(args, layers[-1])
layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
#fc1
layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
self.add_params_to_self(args, layers[-1])
#softmax
layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
self.add_params_to_self(args, layers[-1])
self.layers = layers
self.y = ll.get_output(layers[-1], x, deterministic=False)
self.prediction = T.argmax(self.y, axis=1)
# self.penalty = penalty if penalty != 0. else T.constant(0.)
print(self.params_lambda)
# time.sleep(20)
# cost function
self.loss = T.mean(categorical_crossentropy(self.y, y))
self.lossWithPenalty = T.add(self.loss, self.penalty)
print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty)
|
{
"content_hash": "ca07fc99a2b07b72670ba0e5e79f596e",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 109,
"avg_line_length": 41.810344827586206,
"alnum_prop": 0.5723711340206186,
"repo_name": "bigaidream-projects/drmad",
"id": "70456806cb945b0f5ba084d6f585fb3171fd392e",
"size": "4850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpu_ver/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "386831"
}
],
"symlink_target": ""
}
|
import fourch
import fourdragon
import re
""" THIS IS HOW I'D LIKE THE MODULE TO WORK:
>>> r = Raid("123456789", verbose=True)
Found Boss(name=$name, type=$type, element=$element, difficulty=$difficulty, hp=$hp)
>>> r.sync()
Found Bard(id=$id, ) # unsure what Bards will have
Found Knight(id=$id, ) # same here
Knight(id=$id, ) hit the beast for $n!
Found Healer(id=$id, )
>>> r.sync()
Healer(id=$id, ) revived Knight(id=$id, ).
>>> r.events
# Just a list of things that's happened:
[{"text": "Found boss blah blah", ...},
{"text": "Found Bard $id blah", ...},
{"text": "Found Knight $id blah", ...},
...]
"""
class Raid(object):
min_roll = 11 # less than this: you die
max_revive_times = 6
max_avenge_times = 6
def __init__(self, thread, board="q", verbose=False, debug=False):
self.thread = fourch.board(board).thread(thread)
self.verbose = verbose
self.debug = debug
self.boss = None
self._post_dead = False
self._fresh = True
self._fetch()
self.heroes = {}
self.events = []
def sync(self):
# Post/Boss is dead.
if not self.thread.alive or self.boss.health <= 0:
return
new = self.thread.update()
if not self._fresh and new == 0:
return
if self._fresh:
self._fresh = False
# Slice [-new:] so we just iterate the new posts.
for post in self.thread.replies[-new:]:
hero = self.hero(post)
if hero.dead and not hero.can_attack_when_dead:
# YOU'RE DEAD, DON'T EVEN TRY THAT CRAP.
continue
# TODO: Bard bonus~?
def hero(self, post, **kwargs):
if post.id not in self.heroes:
hero = fourdragon.Hero.from_id(post.id)(**kwargs)
self.heroes[post.id] = hero
else:
hero = self.heroes[post.id]
nickname = self._parse_command("nickname", r"(\w{,14})", post.comment_text)
if nickname is not None:
hero.nickname = nickname
post.roll = self.roll(post.number, 2)
hero.posts.append(post)
return hero
def roll(self, no, n):
"""TODO: improve this.
"""
r = int(str(no)[-n:])
while r == 0:
n += 1
r = int(str(no)[-n:])
return r
def _parse_command(self, name, matcher, comment, flags=re.I):
r = "{0}@{1}".format(name, matcher)
search = re.search(r, comment, flags=flags)
if search:
return search.group(1)
return None
def _fetch(self):
""" Pulls the OP comment from the thread and sets up required
variables (read: the boss) from the post.
"""
if self.debug:
op = ("It's dragon slaying time, bitches!\n"
"SOME INFO BOUT DIS DRAGON:\n"
"difficulty@noob\n"
"name@SomeDude\n"
"element@random")
else:
op = self.thread.op.comment_text
boss = {
"difficulty": self._parse_command("difficulty", r"(noob|easy|medium|hard)", op),
"name": self._parse_command("name", r"(\w+)", op),
"element": self._parse_command("element", r"(random|fire|earth|water|ice|electric)", op),
"health": self._parse_command("health", r"(\d+)", op)
}
self.boss = fourdragon.Boss(**boss)
|
{
"content_hash": "5ef6256fba928c6b9a40b92008cadb6d",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 33.04807692307692,
"alnum_prop": 0.5388420133837649,
"repo_name": "sysr-q/4dragon",
"id": "97d3878e4544750a40dd887d283f006a30eb09f2",
"size": "3461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fourdragon/raid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7593"
}
],
"symlink_target": ""
}
|
from . import AppCase
from app.models.attachment import Attachment
class AttachmentTest(AppCase):
def setUp(self):
self.setup_app()
self.create_project()
self.create_issue()
self.attachment = Attachment(
file_id='12345',
project=self.test_project,
parent=self.test_issue
)
self.attachment.save()
def tearDown(self):
self.teardown_dbs()
def test_pre_delete(self):
self.test_project.attachments.append(self.attachment)
self.test_project.save()
self.test_issue.attachments.append(self.attachment)
self.test_issue.save()
self.assertEqual(Attachment.objects.count(), 1)
self.assertEqual(len(self.test_issue.attachments), 1)
self.assertEqual(len(self.test_project.attachments), 1)
self.attachment.delete()
self.assertEqual(Attachment.objects.count(), 0)
self.assertEqual(len(self.test_issue.attachments), 0)
self.assertEqual(len(self.test_project.attachments), 0)
|
{
"content_hash": "29ba5161dc1b9a9527f75c2981e178c2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 63,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6355140186915887,
"repo_name": "publicscience/hive",
"id": "e0df35031315ec4465677f0968c2435aecb6c107",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/attachment_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45547"
},
{
"name": "JavaScript",
"bytes": "14559"
},
{
"name": "Python",
"bytes": "64421"
},
{
"name": "Shell",
"bytes": "803"
}
],
"symlink_target": ""
}
|
"""
Ensures client1 can connect but that clients with ou=client2 or ca=other_root can't connect.
"""
from common import LOCALHOST, RootCert, STATUS_PORT, SocketPair, TcpServer, TlsClient, print_ok, run_ghostunnel, terminate
import ssl
if __name__ == "__main__":
ghostunnel = None
try:
# create certs
root = RootCert('root')
root.create_signed_cert('server')
root.create_signed_cert('client1')
root.create_signed_cert('client2')
other_root = RootCert('other_root')
other_root.create_signed_cert('other_client1')
# start ghostunnel
ghostunnel = run_ghostunnel(['server',
'--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST),
'--keystore=server.p12',
'--status={0}:{1}'.format(LOCALHOST,
STATUS_PORT),
'--cacert=root.crt',
'--disable-authentication'])
# connect with no client cert, confirm that the tunnel is up
pair = SocketPair(TlsClient(None, 'root', 13001), TcpServer(13002))
pair.validate_can_send_from_client(
"hello world", "1: client -> server")
pair.validate_can_send_from_server(
"hello world", "1: server -> client")
pair.validate_closing_client_closes_server(
"1: client closed -> server closed")
# connect with client1 cert, confirm that the tunnel is up
pair2 = SocketPair(
TlsClient('client1', 'root', 13001), TcpServer(13002))
pair2.validate_can_send_from_client(
"hello world", "1: client -> server")
pair2.validate_can_send_from_server(
"hello world", "1: server -> client")
pair2.validate_closing_client_closes_server(
"1: client closed -> server closed")
# connect with client2, confirm that the tunnel isn't up
try:
pair = SocketPair(
TlsClient('client2', 'root', 13001), TcpServer(13002))
except ssl.SSLError:
raise Exception(
'rejected unauthenticated client2, despite --disable-authentication')
# connect with other_client1, confirm that the tunnel isn't up
try:
pair = SocketPair(
TlsClient('other_client1', 'root', 13001), TcpServer(13002))
except ssl.SSLError:
raise Exception(
'rejected authenticated other_client1, despite --disable-authentication')
pair.cleanup()
print_ok("OK")
finally:
terminate(ghostunnel)
|
{
"content_hash": "a12bb04aa8770f01508eebf51fd648e1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 122,
"avg_line_length": 41.029411764705884,
"alnum_prop": 0.5491039426523298,
"repo_name": "square/ghostunnel",
"id": "0751180cdd865b62aab42d0307fb3da1ddc5c6ae",
"size": "2814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test-server-disable-authentication.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "803"
},
{
"name": "Go",
"bytes": "178685"
},
{
"name": "Makefile",
"bytes": "3513"
},
{
"name": "Python",
"bytes": "138583"
}
],
"symlink_target": ""
}
|
from django.conf import settings
import sys
__author__ = 'franki'
from django.apps import AppConfig
class ImageConfig(AppConfig):
name = 'image'
label = 'image'
verbose_name = "Image"
# def ready(self):
# if settings.DEBUG and not 'django.template.context_processors.request' in settings.TEMPLATE_CONTEXT_PROCESSORS:
# print >> sys.stderr, \
# "image: Add 'django.template.context_processors.request' to TEMPLATE_CONTEXT_PROCESSORS in order to\n" \
# "give access to sessions from templates. Otherwise set autogen=true in all uses. This message only\n" \
# "appears with DEBUG enabled."
|
{
"content_hash": "0fce0febc66af68fa6e5a68edfc3a98a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 122,
"avg_line_length": 35.578947368421055,
"alnum_prop": 0.658284023668639,
"repo_name": "neerajvashistha/pa-dude",
"id": "c4eaeb348c496828977bdeb3c626420b84fa16ef",
"size": "691",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/image/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
import re
import unittest
from pathlib import Path
from shutil import copyfile, copytree
from tempfile import TemporaryDirectory
import jmespath
import pytest
from parameterized import parameterized
from tests.charts.helm_template_generator import render_chart
CHART_DIR = Path(__file__).parent / ".." / ".." / "chart"
class PodTemplateFileTest(unittest.TestCase):
@classmethod
@pytest.fixture(autouse=True, scope="class")
def isolate_chart(cls):
with TemporaryDirectory() as tmp_dir:
cls.temp_chart_dir = tmp_dir + "/chart"
copytree(CHART_DIR, cls.temp_chart_dir)
copyfile(
cls.temp_chart_dir + "/files/pod-template-file.kubernetes-helm-yaml",
cls.temp_chart_dir + "/templates/pod-template-file.yaml",
)
yield
def test_should_work(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert jmespath.search("spec.containers[0].image", docs[0]) is not None
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_should_add_an_init_container_if_git_sync_is_true(self):
docs = render_chart(
values={
"images": {
"gitSync": {
"repository": "test-registry/test-repo",
"tag": "test-tag",
"pullPolicy": "Always",
}
},
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"wait": 66,
"maxFailures": 70,
"subPath": "path1/path2",
"rev": "HEAD",
"depth": 1,
"repo": "https://github.com/apache/airflow.git",
"branch": "test-branch",
"sshKeySecret": None,
"credentialsSecret": None,
"knownHosts": None,
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {
"name": "git-sync-test-init",
"securityContext": {"runAsUser": 65533},
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
"env": [
{"name": "GIT_SYNC_REV", "value": "HEAD"},
{"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
{"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
{"name": "GIT_SYNC_DEPTH", "value": "1"},
{"name": "GIT_SYNC_ROOT", "value": "/git"},
{"name": "GIT_SYNC_DEST", "value": "repo"},
{"name": "GIT_SYNC_ADD_USER", "value": "true"},
{"name": "GIT_SYNC_WAIT", "value": "66"},
{"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
{"name": "GIT_SYNC_ONE_TIME", "value": "true"},
],
"volumeMounts": [{"mountPath": "/git", "name": "dags"}],
"resources": {},
} == jmespath.search("spec.initContainers[0]", docs[0])
def test_should_not_add_init_container_if_dag_persistence_is_true(self):
docs = render_chart(
values={
"dags": {
"persistence": {"enabled": True},
"gitSync": {"enabled": True},
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert jmespath.search("spec.initContainers", docs[0]) is None
@parameterized.expand(
[
({"gitSync": {"enabled": True}}, True),
({"persistence": {"enabled": True}}, False),
(
{
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
},
True,
),
]
)
def test_dags_mount(self, dag_values, expected_read_only):
docs = render_chart(
values={"dags": dag_values},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": expected_read_only,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_validate_if_ssh_params_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": None,
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"mountPath": "/etc/git-secret/ssh",
"subPath": "gitSshKey",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.volumes", docs[0])
def test_validate_if_ssh_known_hosts_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": "github.com ssh-rsa AAAABdummy",
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "GIT_SSH_KNOWN_HOSTS_FILE",
"value": "/etc/git-secret/known_hosts",
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "config",
"mountPath": "/etc/git-secret/known_hosts",
"subPath": "known_hosts",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
def test_should_set_username_and_pass_env_variables(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"credentialsSecret": "user-pass-secret",
"sshKeySecret": None,
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "GIT_SYNC_USERNAME",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "GIT_SYNC_PASSWORD",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_should_set_the_dags_volume_claim_correctly_when_using_an_existing_claim(self):
docs = render_chart(
values={"dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
"spec.volumes", docs[0]
)
def test_should_use_empty_dir_for_gitsync_without_persistence(self):
docs = render_chart(
values={"dags": {"gitSync": {"enabled": True}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "emptyDir": {}} in jmespath.search("spec.volumes", docs[0])
@parameterized.expand(
[
({"enabled": False}, {"emptyDir": {}}),
({"enabled": True}, {"persistentVolumeClaim": {"claimName": "RELEASE-NAME-logs"}}),
(
{"enabled": True, "existingClaim": "test-claim"},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
]
)
def test_logs_persistence_changes_volume(self, log_persistence_values, expected):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "logs", **expected} in jmespath.search("spec.volumes", docs[0])
def test_should_set_a_custom_image_in_pod_template(self):
docs = render_chart(
values={"images": {"pod_template": {"repository": "dummy_image", "tag": "latest"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "dummy_image:latest" == jmespath.search("spec.containers[0].image", docs[0])
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_mount_airflow_cfg(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'} in jmespath.search(
"spec.volumes", docs[0]
)
assert {
'name': 'config',
'mountPath': '/opt/airflow/airflow.cfg',
'subPath': 'airflow.cfg',
'readOnly': True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_should_use_global_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"executor": "KubernetesExecutor",
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"executor": "KubernetesExecutor",
"workers": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "Pod" == jmespath.search("kind", docs[0])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
def test_affinity_tolerations_and_node_selector_precedence(self):
"""When given both global and worker affinity etc, worker affinity etc is used"""
expected_affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
}
docs = render_chart(
values={
"workers": {
"affinity": expected_affinity,
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"type": "ssd"},
},
"affinity": {
"nodeAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 1,
"preference": {
"matchExpressions": [
{"key": "not-me", "operator": "In", "values": ["true"]},
]
},
}
]
}
},
"tolerations": [
{"key": "not-me", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"type": "not-me"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert expected_affinity == jmespath.search("spec.affinity", docs[0])
assert "ssd" == jmespath.search(
"spec.nodeSelector.type",
docs[0],
)
tolerations = jmespath.search("spec.tolerations", docs[0])
assert 1 == len(tolerations)
assert "dynamic-pods" == tolerations[0]["key"]
def test_should_not_create_default_affinity(self):
docs = render_chart(show_only=["templates/pod-template-file.yaml"], chart_dir=self.temp_chart_dir)
assert {} == jmespath.search("spec.affinity", docs[0])
def test_should_add_fsgroup_to_the_pod_template(self):
docs = render_chart(
values={"gid": 5000},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
self.assertEqual(5000, jmespath.search("spec.securityContext.fsGroup", docs[0]))
def test_should_create_valid_volume_mount_and_volume(self):
docs = render_chart(
values={
"workers": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
"extraVolumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "test-volume" in jmespath.search(
"spec.volumes[*].name",
docs[0],
)
assert "test-volume" in jmespath.search(
"spec.containers[0].volumeMounts[*].name",
docs[0],
)
def test_should_add_env_for_gitsync(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"env": [{"name": "FOO", "value": "bar"}],
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "FOO", "value": "bar"} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_no_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": None},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
volume_mounts = jmespath.search("spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_airflow_pod_annotations(self):
docs = render_chart(
values={"airflowPodAnnotations": {"my_annotation": "annotated!"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
annotations = jmespath.search("metadata.annotations", docs[0])
assert "my_annotation" in annotations
assert "annotated!" in annotations["my_annotation"]
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"workers": {
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.initContainers[-1]", docs[0])
def test_should_add_extra_containers(self):
docs = render_chart(
values={
"workers": {
"extraContainers": [
{"name": "test-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "test-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.containers[-1]", docs[0])
def test_should_add_pod_labels(self):
docs = render_chart(
values={"labels": {"label1": "value1", "label2": "value2"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"label1": "value1",
"label2": "value2",
"release": "RELEASE-NAME",
"component": "worker",
"tier": "airflow",
} == jmespath.search("metadata.labels", docs[0])
def test_should_add_resources(self):
docs = render_chart(
values={
"workers": {
"resources": {
"requests": {"memory": "2Gi", "cpu": "1"},
"limits": {"memory": "3Gi", "cpu": "2"},
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"limits": {
"cpu": "2",
"memory": "3Gi",
},
"requests": {
"cpu": "1",
"memory": "2Gi",
},
} == jmespath.search("spec.containers[0].resources", docs[0])
def test_empty_resources(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {} == jmespath.search("spec.containers[0].resources", docs[0])
|
{
"content_hash": "4bea39a33a328c655c415fd7e4f0c6fb",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 109,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.4658565579183727,
"repo_name": "lyft/incubator-airflow",
"id": "2ac8927f1cf8a6182943b552ae55c6561d98f54f",
"size": "23498",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/charts/test_pod_template_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
}
|
from flask import request, flash, session, redirect, url_for, render_template, make_response
from flask_blog import db, cache
from flask_blog.user import blueprint
from flask_blog.model.user import User
from flask_blog.core.encrypt import password_validate, newest_encrypt
@blueprint.route("/login", methods=["GET", "POST"])
def login():
""" user login """
errors = []
if request.method == "POST":
# do user login validation
username = request.form.get("username")
password = request.form.get("password")
user = User.query.filter(User.username == username).first()
if user is None:
errors.append("Invalid username!")
else:
validate_result = password_validate(password, user.password)
if True in validate_result:
if "updated" in validate_result:
# password encrypt algorithm have changed
# update user's password with newest encrypt algorithm
user.password = newest_encrypt(user.password)
db.session.add(user)
db.session.commit()
if user.status == "normal":
flash("Welcome, {0}!".format(username.encode("utf-8")))
session["current_user"] = user
resp = make_response(redirect(url_for("blog.index")))
memorize = request.form.get("memorize")
if memorize == "on":
resp.set_cookie("user_id", str(user.user_id), max_age=7 * 24 * 3600)
else:
resp.set_cookie("user_id", str(user.user_id), max_age=2 * 3600)
cache.clear()
return resp
else:
flash("Sorry, user '{0}' is blocked!".format(user.username.encode("utf-8")))
else:
errors.append("Invalid password!")
return render_template("user/login.html", errors=errors, username=username)
return render_template("user/login.html", errors=errors)
|
{
"content_hash": "a420c7c792dd452e678b3774feee54fa",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 46.733333333333334,
"alnum_prop": 0.5582501188777936,
"repo_name": "PurpleSun/Flask_Blog",
"id": "2f30889e0bf9174658014bf5f4b0c740ff6d2811",
"size": "2150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_blog/user/login.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "78968"
},
{
"name": "JavaScript",
"bytes": "118242"
},
{
"name": "Python",
"bytes": "20962"
}
],
"symlink_target": ""
}
|
import abc
import copy
import datetime
import traceback
import mock
from oslo_context import context
from oslo_utils import timeutils
from oslotest import base
from oslotest import mockpatch
import six
from stevedore import extension
from ceilometer import pipeline
from ceilometer import publisher
from ceilometer.publisher import test as test_publisher
from ceilometer import sample
from ceilometer import transformer
from ceilometer.transformer import accumulator
from ceilometer.transformer import arithmetic
from ceilometer.transformer import conversions
@six.add_metaclass(abc.ABCMeta)
class BasePipelineTestCase(base.BaseTestCase):
@staticmethod
def fake_tem_init():
"""Fake a transformerManager for pipeline.
The faked entry point setting is below:
update: TransformerClass
except: TransformerClassException
drop: TransformerClassDrop
"""
pass
def fake_tem_get_ext(self, name):
class_name_ext = {
'update': self.TransformerClass,
'except': self.TransformerClassException,
'drop': self.TransformerClassDrop,
'cache': accumulator.TransformerAccumulator,
'aggregator': conversions.AggregatorTransformer,
'unit_conversion': conversions.ScalingTransformer,
'rate_of_change': conversions.RateOfChangeTransformer,
'arithmetic': arithmetic.ArithmeticTransformer,
'delta': conversions.DeltaTransformer,
}
if name in class_name_ext:
return extension.Extension(name, None,
class_name_ext[name],
None,
)
raise KeyError(name)
def get_publisher(self, url, namespace=''):
fake_drivers = {'test://': test_publisher.TestPublisher,
'new://': test_publisher.TestPublisher,
'except://': self.PublisherClassException}
return fake_drivers[url](url)
class PublisherClassException(publisher.PublisherBase):
def publish_samples(self, ctxt, samples):
raise Exception()
def publish_events(self, ctxt, events):
raise Exception()
class TransformerClass(transformer.TransformerBase):
samples = []
grouping_keys = ['counter_name']
def __init__(self, append_name='_update'):
self.__class__.samples = []
self.append_name = append_name
def flush(self, ctxt):
return []
def handle_sample(self, ctxt, counter):
self.__class__.samples.append(counter)
newname = getattr(counter, 'name') + self.append_name
return sample.Sample(
name=newname,
type=counter.type,
volume=counter.volume,
unit=counter.unit,
user_id=counter.user_id,
project_id=counter.project_id,
resource_id=counter.resource_id,
timestamp=counter.timestamp,
resource_metadata=counter.resource_metadata,
)
class TransformerClassDrop(transformer.TransformerBase):
samples = []
grouping_keys = ['resource_id']
def __init__(self):
self.__class__.samples = []
def handle_sample(self, ctxt, counter):
self.__class__.samples.append(counter)
class TransformerClassException(object):
grouping_keys = ['resource_id']
@staticmethod
def handle_sample(ctxt, counter):
raise Exception()
def setUp(self):
super(BasePipelineTestCase, self).setUp()
self.test_counter = sample.Sample(
name='a',
type=sample.TYPE_GAUGE,
volume=1,
unit='B',
user_id="test_user",
project_id="test_proj",
resource_id="test_resource",
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
)
self.useFixture(mockpatch.PatchObject(
publisher, 'get_publisher', side_effect=self.get_publisher))
self.transformer_manager = mock.MagicMock()
self.transformer_manager.__getitem__.side_effect = \
self.fake_tem_get_ext
self._setup_pipeline_cfg()
self._reraise_exception = True
self.useFixture(mockpatch.Patch(
'ceilometer.pipeline.LOG.exception',
side_effect=self._handle_reraise_exception))
def _handle_reraise_exception(self, msg):
if self._reraise_exception:
raise Exception(traceback.format_exc())
@abc.abstractmethod
def _setup_pipeline_cfg(self):
"""Setup the appropriate form of pipeline config."""
@abc.abstractmethod
def _augment_pipeline_cfg(self):
"""Augment the pipeline config with an additional element."""
@abc.abstractmethod
def _break_pipeline_cfg(self):
"""Break the pipeline config with a malformed element."""
@abc.abstractmethod
def _dup_pipeline_name_cfg(self):
"""Break the pipeline config with duplicate pipeline name."""
@abc.abstractmethod
def _set_pipeline_cfg(self, field, value):
"""Set a field to a value in the pipeline config."""
@abc.abstractmethod
def _extend_pipeline_cfg(self, field, value):
"""Extend an existing field in the pipeline config with a value."""
@abc.abstractmethod
def _unset_pipeline_cfg(self, field):
"""Clear an existing field in the pipeline config."""
def _exception_create_pipelinemanager(self):
self.assertRaises(pipeline.PipelineException,
pipeline.PipelineManager,
self.pipeline_cfg,
self.transformer_manager)
def test_no_counters(self):
self._unset_pipeline_cfg('counters')
self._exception_create_pipelinemanager()
def test_no_transformers(self):
self._unset_pipeline_cfg('transformers')
pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager)
def test_no_name(self):
self._unset_pipeline_cfg('name')
self._exception_create_pipelinemanager()
def test_no_interval(self):
self._unset_pipeline_cfg('interval')
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
self.assertEqual(600, pipe.get_interval())
def test_no_publishers(self):
self._unset_pipeline_cfg('publishers')
self._exception_create_pipelinemanager()
def test_invalid_resources(self):
invalid_resource = {'invalid': 1}
self._set_pipeline_cfg('resources', invalid_resource)
self._exception_create_pipelinemanager()
def test_check_counters_include_exclude_same(self):
counter_cfg = ['a', '!a']
self._set_pipeline_cfg('counters', counter_cfg)
self._exception_create_pipelinemanager()
def test_check_counters_include_exclude(self):
counter_cfg = ['a', '!b']
self._set_pipeline_cfg('counters', counter_cfg)
self._exception_create_pipelinemanager()
def test_check_counters_wildcard_included(self):
counter_cfg = ['a', '*']
self._set_pipeline_cfg('counters', counter_cfg)
self._exception_create_pipelinemanager()
def test_check_publishers_invalid_publisher(self):
publisher_cfg = ['test_invalid']
self._set_pipeline_cfg('publishers', publisher_cfg)
def test_invalid_string_interval(self):
self._set_pipeline_cfg('interval', 'string')
self._exception_create_pipelinemanager()
def test_check_transformer_invalid_transformer(self):
transformer_cfg = [
{'name': "test_invalid",
'parameters': {}}
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._exception_create_pipelinemanager()
def test_get_interval(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
self.assertEqual(5, pipe.get_interval())
def test_publisher_transformer_invoked(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, len(self.TransformerClass.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], "name"))
def test_multiple_included_counters(self):
counter_cfg = ['a', 'b']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(2, len(publisher.samples))
self.assertEqual(2, len(self.TransformerClass.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
self.assertEqual('b_update', getattr(publisher.samples[1], "name"))
@mock.patch('ceilometer.pipeline.LOG')
def test_none_volume_counter(self, LOG):
self._set_pipeline_cfg('counters', ['empty_volume'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
publisher = pipeline_manager.pipelines[0].publishers[0]
test_s = sample.Sample(
name='empty_volume',
type=self.test_counter.type,
volume=None,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([test_s])
LOG.warning.assert_called_once_with(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has no volume (volume: %(counter_volume)s), the '
'sample will be dropped'
% {'counter_name': test_s.name,
'resource_id': test_s.resource_id,
'timestamp': test_s.timestamp,
'counter_volume': test_s.volume})
self.assertEqual(0, len(publisher.samples))
@mock.patch('ceilometer.pipeline.LOG')
def test_fake_volume_counter(self, LOG):
self._set_pipeline_cfg('counters', ['fake_volume'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
publisher = pipeline_manager.pipelines[0].publishers[0]
test_s = sample.Sample(
name='fake_volume',
type=self.test_counter.type,
volume='fake_value',
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([test_s])
LOG.warning.assert_called_once_with(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has volume which is not a number '
'(volume: %(counter_volume)s), the sample will be dropped'
% {'counter_name': test_s.name,
'resource_id': test_s.resource_id,
'timestamp': test_s.timestamp,
'counter_volume': test_s.volume})
self.assertEqual(0, len(publisher.samples))
def test_counter_dont_match(self):
counter_cfg = ['nomatch']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
self.assertEqual(0, publisher.calls)
def test_wildcard_counter(self):
counter_cfg = ['*']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, len(self.TransformerClass.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
def test_wildcard_excluded_counters(self):
counter_cfg = ['*', '!a']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].support_meter('a'))
def test_wildcard_excluded_counters_not_excluded(self):
counter_cfg = ['*', '!b']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, len(self.TransformerClass.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
def test_all_excluded_counters_not_excluded(self):
counter_cfg = ['!b', '!c']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, len(self.TransformerClass.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], "name"))
def test_all_excluded_counters_is_excluded(self):
counter_cfg = ['!a', '!c']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].support_meter('a'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('b'))
self.assertFalse(pipeline_manager.pipelines[0].support_meter('c'))
def test_wildcard_and_excluded_wildcard_counters(self):
counter_cfg = ['*', '!disk.*']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu'))
def test_included_counter_and_wildcard_counters(self):
counter_cfg = ['cpu', 'disk.*']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertTrue(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu'))
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('instance'))
def test_excluded_counter_and_excluded_wildcard_counters(self):
counter_cfg = ['!cpu', '!disk.*']
self._set_pipeline_cfg('counters', counter_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertFalse(pipeline_manager.pipelines[0].
support_meter('disk.read.bytes'))
self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu'))
self.assertTrue(pipeline_manager.pipelines[0].
support_meter('instance'))
def test_multiple_pipeline(self):
self._augment_pipeline_cfg()
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, publisher.calls)
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
new_publisher = pipeline_manager.pipelines[1].publishers[0]
self.assertEqual(1, len(new_publisher.samples))
self.assertEqual(1, new_publisher.calls)
self.assertEqual('b_new', getattr(new_publisher.samples[0], "name"))
self.assertEqual(2, len(self.TransformerClass.samples))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], "name"))
self.assertEqual('b',
getattr(self.TransformerClass.samples[1], "name"))
def test_multiple_pipeline_exception(self):
self._reraise_exception = False
self._break_pipeline_cfg()
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.test_counter = sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, publisher.calls)
self.assertEqual(1, len(publisher.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], "name"))
self.assertEqual(2, len(self.TransformerClass.samples))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], "name"))
self.assertEqual('b',
getattr(self.TransformerClass.samples[1], "name"))
def test_none_transformer_pipeline(self):
self._set_pipeline_cfg('transformers', None)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, publisher.calls)
self.assertEqual('a', getattr(publisher.samples[0], 'name'))
def test_empty_transformer_pipeline(self):
self._set_pipeline_cfg('transformers', [])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, publisher.calls)
self.assertEqual('a', getattr(publisher.samples[0], 'name'))
def test_multiple_transformer_same_class(self):
transformer_cfg = [
{
'name': 'update',
'parameters': {}
},
{
'name': 'update',
'parameters': {}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, publisher.calls)
self.assertEqual(1, len(publisher.samples))
self.assertEqual('a_update_update',
getattr(publisher.samples[0], 'name'))
self.assertEqual(2, len(self.TransformerClass.samples))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], 'name'))
self.assertEqual('a_update',
getattr(self.TransformerClass.samples[1], 'name'))
def test_multiple_transformer_same_class_different_parameter(self):
transformer_cfg = [
{
'name': 'update',
'parameters':
{
"append_name": "_update",
}
},
{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(2, len(self.TransformerClass.samples))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], 'name'))
self.assertEqual('a_update',
getattr(self.TransformerClass.samples[1], 'name'))
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1,
len(publisher.samples))
self.assertEqual('a_update_new',
getattr(publisher.samples[0], 'name'))
def test_multiple_transformer_drop_transformer(self):
transformer_cfg = [
{
'name': 'update',
'parameters':
{
"append_name": "_update",
}
},
{
'name': 'drop',
'parameters': {}
},
{
'name': 'update',
'parameters':
{
"append_name": "_new",
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
self.assertEqual(1, len(self.TransformerClass.samples))
self.assertEqual('a',
getattr(self.TransformerClass.samples[0], 'name'))
self.assertEqual(1,
len(self.TransformerClassDrop.samples))
self.assertEqual('a_update',
getattr(self.TransformerClassDrop.samples[0], 'name'))
def test_multiple_publisher(self):
self._set_pipeline_cfg('publishers', ['test://', 'new://'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
new_publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1, len(new_publisher.samples))
self.assertEqual('a_update',
getattr(new_publisher.samples[0], 'name'))
self.assertEqual('a_update',
getattr(publisher.samples[0], 'name'))
def test_multiple_publisher_isolation(self):
self._reraise_exception = False
self._set_pipeline_cfg('publishers', ['except://', 'new://'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
new_publisher = pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(1, len(new_publisher.samples))
self.assertEqual('a_update',
getattr(new_publisher.samples[0], 'name'))
def test_multiple_counter_pipeline(self):
self._set_pipeline_cfg('counters', ['a', 'b'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], 'name'))
self.assertEqual('b_update', getattr(publisher.samples[1], 'name'))
def test_flush_pipeline_cache(self):
CACHE_SIZE = 10
extra_transformer_cfg = [
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE,
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
},
]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, self.test_counter)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
pipe.publish_data(None, self.test_counter)
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
for i in range(CACHE_SIZE - 2):
pipe.publish_data(None, self.test_counter)
pipe.flush(None)
self.assertEqual(CACHE_SIZE, len(publisher.samples))
self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name'))
def test_flush_pipeline_cache_multiple_counter(self):
CACHE_SIZE = 3
extra_transformer_cfg = [
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
},
]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
self._set_pipeline_cfg('counters', ['a', 'b'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(CACHE_SIZE, len(publisher.samples))
self.assertEqual('a_update_new',
getattr(publisher.samples[0], 'name'))
self.assertEqual('b_update_new',
getattr(publisher.samples[1], 'name'))
def test_flush_pipeline_cache_before_publisher(self):
extra_transformer_cfg = [{
'name': 'cache',
'parameters': {}
}]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
publisher = pipe.publishers[0]
pipe.publish_data(None, self.test_counter)
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
self.assertEqual('a_update',
getattr(publisher.samples[0], 'name'))
def test_global_unit_conversion(self):
scale = 'volume / ((10**6) * 60)'
transformer_cfg = [
{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': scale},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=1200000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
cpu_mins = publisher.samples[-1]
self.assertEqual('cpu_mins', getattr(cpu_mins, 'name'))
self.assertEqual('min', getattr(cpu_mins, 'unit'))
self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type'))
self.assertEqual(20, getattr(cpu_mins, 'volume'))
def test_unit_identified_source_unit_conversion(self):
transformer_cfg = [
{
'name': 'unit_conversion',
'parameters': {
'source': {'unit': '°C'},
'target': {'unit': '°F',
'scale': '(volume * 1.8) + 32'},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['core_temperature',
'ambient_temperature'])
counters = [
sample.Sample(
name='core_temperature',
type=sample.TYPE_GAUGE,
volume=36.0,
unit='°C',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
sample.Sample(
name='ambient_temperature',
type=sample.TYPE_GAUGE,
volume=88.8,
unit='°F',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
core_temp = publisher.samples[0]
self.assertEqual('core_temperature', getattr(core_temp, 'name'))
self.assertEqual('°F', getattr(core_temp, 'unit'))
self.assertEqual(96.8, getattr(core_temp, 'volume'))
amb_temp = publisher.samples[1]
self.assertEqual('ambient_temperature', getattr(amb_temp, 'name'))
self.assertEqual('°F', getattr(amb_temp, 'unit'))
self.assertEqual(88.8, getattr(amb_temp, 'volume'))
self.assertEqual(96.8, getattr(core_temp, 'volume'))
def _do_test_rate_of_change_conversion(self, prev, curr, type, expected,
offset=1, weight=None):
s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)"
"* (resource_metadata.non.existent or 1.0)"
"* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))")
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
now = timeutils.utcnow()
later = now + datetime.timedelta(minutes=offset)
um = {'autoscaling_weight': weight} if weight else {}
counters = [
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
pipe.flush(None)
self.assertEqual(2, len(publisher.samples))
cpu_util = publisher.samples[0]
self.assertEqual('cpu_util', getattr(cpu_util, 'name'))
self.assertEqual('test_resource', getattr(cpu_util, 'resource_id'))
self.assertEqual('%', getattr(cpu_util, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type'))
self.assertEqual(expected, getattr(cpu_util, 'volume'))
cpu_util = publisher.samples[1]
self.assertEqual('cpu_util', getattr(cpu_util, 'name'))
self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id'))
self.assertEqual('%', getattr(cpu_util, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type'))
self.assertEqual(expected * 2, getattr(cpu_util, 'volume'))
def test_rate_of_change_conversion(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
25.0)
def test_rate_of_change_conversion_weight(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
27.5,
weight=1.1)
def test_rate_of_change_conversion_negative_cumulative_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_CUMULATIVE,
50.0)
def test_rate_of_change_conversion_negative_gauge_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_GAUGE,
-25.0)
def test_rate_of_change_conversion_zero_delay(self):
self._do_test_rate_of_change_conversion(120000000000,
120000000000,
sample.TYPE_CUMULATIVE,
0.0,
offset=0)
def test_rate_of_change_no_predecessor(self):
s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s}
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
now = timeutils.utcnow()
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=120000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
@mock.patch('ceilometer.transformer.conversions.LOG')
def test_rate_of_change_out_of_order(self, the_log):
s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s}
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
now = timeutils.utcnow()
earlier = now - datetime.timedelta(seconds=10)
later = now + datetime.timedelta(seconds=10)
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=125000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=120000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=earlier.isoformat(),
resource_metadata={'cpu_number': 4}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=130000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 4}
),
]
pipe.publish_data(None, counters)
publisher = pipe.publishers[0]
self.assertEqual(1, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
cpu_util_sample = publisher.samples[0]
self.assertEqual(12.5, cpu_util_sample.volume)
the_log.warning.assert_called_with(
'dropping out of time order sample: %s',
(counters[1],)
)
def test_resources(self):
resources = ['test1://', 'test2://']
self._set_pipeline_cfg('resources', resources)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(resources,
pipeline_manager.pipelines[0].resources)
def test_no_resources(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(0, len(pipeline_manager.pipelines[0].resources))
def _do_test_rate_of_change_mapping(self, pipe, meters, units):
now = timeutils.utcnow()
base = 1000
offset = 7
rate = 42
later = now + datetime.timedelta(minutes=offset)
counters = []
for v, ts in [(base, now.isoformat()),
(base + (offset * 60 * rate), later.isoformat())]:
for n, u, r in [(meters[0], units[0], 'resource1'),
(meters[1], units[1], 'resource2')]:
s = sample.Sample(
name=n,
type=sample.TYPE_CUMULATIVE,
volume=v,
unit=u,
user_id='test_user',
project_id='test_proj',
resource_id=r,
timestamp=ts,
resource_metadata={},
)
counters.append(s)
pipe.publish_data(None, counters)
publisher = pipe.publishers[0]
self.assertEqual(2, len(publisher.samples))
pipe.flush(None)
self.assertEqual(2, len(publisher.samples))
bps = publisher.samples[0]
self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name'))
self.assertEqual('resource1', getattr(bps, 'resource_id'))
self.assertEqual('%s/s' % units[0], getattr(bps, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type'))
self.assertEqual(rate, getattr(bps, 'volume'))
rps = publisher.samples[1]
self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name'))
self.assertEqual('resource2', getattr(rps, 'resource_id'))
self.assertEqual('%s/s' % units[1], getattr(rps, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type'))
self.assertEqual(rate, getattr(rps, 'volume'))
def test_rate_of_change_mapping(self):
map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)',
'unit': '(B|request)'}
map_to = {'name': 'disk.\\1.\\2.rate',
'unit': '\\1/s'}
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {
'map_from': map_from
},
'target': {
'map_to': map_to,
'type': sample.TYPE_GAUGE
},
},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['disk.read.bytes',
'disk.write.requests'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
meters = ('disk.read.bytes', 'disk.write.requests')
units = ('B', 'request')
self._do_test_rate_of_change_mapping(pipe, meters, units)
def _do_test_aggregator(self, parameters, expected_length):
transformer_cfg = [
{
'name': 'aggregator',
'parameters': parameters,
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes'])
counters = [
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=26,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=16,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=53,
unit='B',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=42,
unit='B',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=15,
unit='B',
user_id='test_user',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=2,
unit='B',
user_id='test_user_bis',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '3.0'}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(expected_length, len(publisher.samples))
return sorted(publisher.samples, key=lambda s: s.volume)
def test_aggregator_meter_type(self):
volumes = [1.0, 2.0, 3.0]
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': len(volumes) * len(sample.TYPES)}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters',
['testgauge', 'testcumulative', 'testdelta'])
counters = []
for sample_type in sample.TYPES:
for volume in volumes:
counters.append(sample.Sample(
name='test' + sample_type,
type=sample_type,
volume=volume,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
))
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
actual = sorted(s.volume for s in publisher.samples)
self.assertEqual([2.0, 3.0, 6.0], actual)
def test_aggregator_metadata(self):
for conf, expected_version in [('last', '2.0'), ('first', '1.0')]:
samples = self._do_test_aggregator({
'resource_metadata': conf,
'target': {'name': 'aggregated-bytes'}
}, expected_length=4)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(2, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
s = samples[1]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(15, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': '2.0'},
s.resource_metadata)
s = samples[2]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(42, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': expected_version},
s.resource_metadata)
s = samples[3]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(95, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': expected_version},
s.resource_metadata)
def test_aggregator_user_last_and_metadata_last(self):
samples = self._do_test_aggregator({
'resource_metadata': 'last',
'user_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=2)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(44, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
s = samples[1]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(110, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': '2.0'},
s.resource_metadata)
def test_aggregator_user_first_and_metadata_last(self):
samples = self._do_test_aggregator({
'resource_metadata': 'last',
'user_id': 'first',
'target': {'name': 'aggregated-bytes'}
}, expected_length=2)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(44, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
s = samples[1]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(110, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': '2.0'},
s.resource_metadata)
def test_aggregator_all_first(self):
samples = self._do_test_aggregator({
'resource_metadata': 'first',
'user_id': 'first',
'project_id': 'first',
'target': {'name': 'aggregated-bytes'}
}, expected_length=1)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(154, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '1.0'},
s.resource_metadata)
def test_aggregator_all_last(self):
samples = self._do_test_aggregator({
'resource_metadata': 'last',
'user_id': 'last',
'project_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=1)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(154, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
def test_aggregator_all_mixed(self):
samples = self._do_test_aggregator({
'resource_metadata': 'drop',
'user_id': 'first',
'project_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=1)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(154, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({}, s.resource_metadata)
def test_aggregator_metadata_default(self):
samples = self._do_test_aggregator({
'user_id': 'last',
'project_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=1)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(154, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
@mock.patch('ceilometer.transformer.conversions.LOG')
def test_aggregator_metadata_invalid(self, mylog):
samples = self._do_test_aggregator({
'resource_metadata': 'invalid',
'user_id': 'last',
'project_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=1)
s = samples[0]
self.assertTrue(mylog.warning.called)
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(154, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
def test_aggregator_sized_flush(self):
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': 2},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes'])
counters = [
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=26,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=16,
unit='B',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
)
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, [counters[0]])
pipe.flush(None)
publisher = pipe.publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.publish_data(None, [counters[1]])
pipe.flush(None)
publisher = pipe.publishers[0]
self.assertEqual(2, len(publisher.samples))
def test_aggregator_timed_flush(self):
timeutils.set_time_override()
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': 900, 'retention_time': 60},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes'])
counters = [
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=26,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
timeutils.advance_time_seconds(120)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
def test_aggregator_without_authentication(self):
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': 2},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['storage.objects.outgoing.bytes'])
counters = [
sample.Sample(
name='storage.objects.outgoing.bytes',
type=sample.TYPE_DELTA,
volume=26,
unit='B',
user_id=None,
project_id=None,
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.outgoing.bytes',
type=sample.TYPE_DELTA,
volume=16,
unit='B',
user_id=None,
project_id=None,
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
)
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, [counters[0]])
pipe.flush(None)
publisher = pipe.publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.publish_data(None, [counters[1]])
pipe.flush(None)
publisher = pipe.publishers[0]
self.assertEqual(1, len(publisher.samples))
self.assertEqual(42, getattr(publisher.samples[0], 'volume'))
self.assertEqual("test_resource", getattr(publisher.samples[0],
'resource_id'))
def test_aggregator_to_rate_of_change_transformer_two_resources(self):
resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4',
'5dd418a6-c6a9-49c9-9cef-b357d72c71dd']
aggregator = conversions.AggregatorTransformer(size="2",
timestamp="last")
rate_of_change_transformer = conversions.RateOfChangeTransformer()
counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00')
for offset in range(2):
counter = copy.copy(self.test_counter)
counter.timestamp = timeutils.isotime(counter_time)
counter.resource_id = resource_id[0]
counter.volume = offset
counter.type = sample.TYPE_CUMULATIVE
counter.unit = 'ns'
aggregator.handle_sample(context.get_admin_context(), counter)
if offset == 1:
test_time = counter_time
counter_time = counter_time + datetime.timedelta(0, 1)
aggregated_counters = aggregator.flush(context.get_admin_context())
self.assertEqual(len(aggregated_counters), 1)
self.assertEqual(aggregated_counters[0].timestamp,
timeutils.isotime(test_time))
rate_of_change_transformer.handle_sample(context.get_admin_context(),
aggregated_counters[0])
for offset in range(2):
counter = copy.copy(self.test_counter)
counter.timestamp = timeutils.isotime(counter_time)
counter.resource_id = resource_id[offset]
counter.volume = 2
counter.type = sample.TYPE_CUMULATIVE
counter.unit = 'ns'
aggregator.handle_sample(context.get_admin_context(), counter)
if offset == 0:
test_time = counter_time
counter_time = counter_time + datetime.timedelta(0, 1)
aggregated_counters = aggregator.flush(context.get_admin_context())
self.assertEqual(len(aggregated_counters), 2)
for counter in aggregated_counters:
if counter.resource_id == resource_id[0]:
rateOfChange = rate_of_change_transformer.handle_sample(
context.get_admin_context(), counter)
self.assertEqual(counter.timestamp,
timeutils.isotime(test_time))
self.assertEqual(rateOfChange.volume, 1)
def _do_test_arithmetic_expr_parse(self, expr, expected):
actual = arithmetic.ArithmeticTransformer.parse_expr(expr)
self.assertEqual(expected, actual)
def test_arithmetic_expr_parse(self):
expr = '$(cpu) + $(cpu.util)'
expected = ('cpu.volume + _cpu_util_ESC.volume',
{
'cpu': 'cpu',
'cpu.util': '_cpu_util_ESC'
})
self._do_test_arithmetic_expr_parse(expr, expected)
def test_arithmetic_expr_parse_parameter(self):
expr = '$(cpu) + $(cpu.util).resource_metadata'
expected = ('cpu.volume + _cpu_util_ESC.resource_metadata',
{
'cpu': 'cpu',
'cpu.util': '_cpu_util_ESC'
})
self._do_test_arithmetic_expr_parse(expr, expected)
def test_arithmetic_expr_parse_reserved_keyword(self):
expr = '$(class) + $(cpu.util)'
expected = ('_class_ESC.volume + _cpu_util_ESC.volume',
{
'class': '_class_ESC',
'cpu.util': '_cpu_util_ESC'
})
self._do_test_arithmetic_expr_parse(expr, expected)
def test_arithmetic_expr_parse_already_escaped(self):
expr = '$(class) + $(_class_ESC)'
expected = ('_class_ESC.volume + __class_ESC_ESC.volume',
{
'class': '_class_ESC',
'_class_ESC': '__class_ESC_ESC'
})
self._do_test_arithmetic_expr_parse(expr, expected)
def _do_test_arithmetic(self, expression, scenario, expected):
transformer_cfg = [
{
'name': 'arithmetic',
'parameters': {
'target': {'name': 'new_meter',
'unit': '%',
'type': sample.TYPE_GAUGE,
'expr': expression},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters',
list(set(s['name'] for s in scenario)))
counters = []
test_resources = ['test_resource1', 'test_resource2']
for resource_id in test_resources:
for s in scenario:
counters.append(sample.Sample(
name=s['name'],
type=sample.TYPE_CUMULATIVE,
volume=s['volume'],
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id=resource_id,
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=s.get('metadata')
))
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
for s in counters:
pipe.publish_data(None, s)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
expected_len = len(test_resources) * len(expected)
self.assertEqual(expected_len, len(publisher.samples))
# bucket samples by resource first
samples_by_resource = dict((r, []) for r in test_resources)
for s in publisher.samples:
samples_by_resource[s.resource_id].append(s)
for resource_id in samples_by_resource:
self.assertEqual(len(expected),
len(samples_by_resource[resource_id]))
for i, s in enumerate(samples_by_resource[resource_id]):
self.assertEqual('new_meter', getattr(s, 'name'))
self.assertEqual(resource_id, getattr(s, 'resource_id'))
self.assertEqual('%', getattr(s, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type'))
self.assertEqual(expected[i], getattr(s, 'volume'))
def test_arithmetic_transformer(self):
expression = '100.0 * $(memory.usage) / $(memory)'
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
]
expected = [50.0]
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_expr_empty(self):
expression = ''
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
]
expected = []
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_expr_misconfigured(self):
expression = '512.0 * 3'
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
]
expected = []
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_nan(self):
expression = 'float(\'nan\') * $(memory.usage) / $(memory)'
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
]
expected = []
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_exception(self):
expression = '$(memory) / 0'
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
]
expected = []
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_multiple_samples(self):
expression = '100.0 * $(memory.usage) / $(memory)'
scenario = [
dict(name='memory', volume=2048.0),
dict(name='memory.usage', volume=512.0),
dict(name='memory', volume=1024.0),
]
expected = [25.0]
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_missing(self):
expression = '100.0 * $(memory.usage) / $(memory)'
scenario = [dict(name='memory.usage', volume=512.0)]
expected = []
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_more_than_needed(self):
expression = '100.0 * $(memory.usage) / $(memory)'
scenario = [
dict(name='memory', volume=1024.0),
dict(name='memory.usage', volume=512.0),
dict(name='cpu_util', volume=90.0),
]
expected = [50.0]
self._do_test_arithmetic(expression, scenario, expected)
def test_arithmetic_transformer_cache_cleared(self):
transformer_cfg = [
{
'name': 'arithmetic',
'parameters': {
'target': {'name': 'new_meter',
'expr': '$(memory.usage) + 2'}
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['memory.usage'])
counter = sample.Sample(
name='memory.usage',
type=sample.TYPE_GAUGE,
volume=1024.0,
unit='MB',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=None
)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, [counter])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
self.assertEqual(1026.0, publisher.samples[0].volume)
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
counter.volume = 2048.0
pipe.publish_data(None, [counter])
pipe.flush(None)
self.assertEqual(2, len(publisher.samples))
self.assertEqual(2050.0, publisher.samples[1].volume)
def test_aggregator_timed_flush_no_matching_samples(self):
timeutils.set_time_override()
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': 900, 'retention_time': 60},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['unrelated-sample'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
timeutils.advance_time_seconds(200)
pipe = pipeline_manager.pipelines[0]
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
def _do_test_delta(self, data, expected, growth_only=False):
transformer_cfg = [
{
'name': 'delta',
'parameters': {
'target': {'name': 'new_meter'},
'growth_only': growth_only,
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, data)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(expected, len(publisher.samples))
return publisher.samples
def test_delta_transformer(self):
samples = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=26,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=16,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=53,
unit='ns',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
]
deltas = self._do_test_delta(samples, 2)
self.assertEqual('new_meter', deltas[0].name)
self.assertEqual('delta', deltas[0].type)
self.assertEqual('ns', deltas[0].unit)
self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata)
self.assertEqual(-10, deltas[0].volume)
self.assertEqual('new_meter', deltas[1].name)
self.assertEqual('delta', deltas[1].type)
self.assertEqual('ns', deltas[1].unit)
self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata)
self.assertEqual(37, deltas[1].volume)
def test_delta_transformer_out_of_order(self):
samples = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=26,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=16,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5))
.isoformat()),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=53,
unit='ns',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
]
deltas = self._do_test_delta(samples, 1)
self.assertEqual('new_meter', deltas[0].name)
self.assertEqual('delta', deltas[0].type)
self.assertEqual('ns', deltas[0].unit)
self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata)
self.assertEqual(27, deltas[0].volume)
def test_delta_transformer_growth_only(self):
samples = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=26,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=16,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=53,
unit='ns',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
]
deltas = self._do_test_delta(samples, 1, True)
self.assertEqual('new_meter', deltas[0].name)
self.assertEqual('delta', deltas[0].type)
self.assertEqual('ns', deltas[0].unit)
self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata)
self.assertEqual(37, deltas[0].volume)
def test_unique_pipeline_names(self):
self._dup_pipeline_name_cfg()
self._exception_create_pipelinemanager()
def test_get_pipeline_grouping_key(self):
transformer_cfg = [
{
'name': 'update',
'parameters': {}
},
{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': 'volume'},
}
},
{
'name': 'update',
'parameters': {}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(set(['resource_id', 'counter_name']),
set(pipeline.get_pipeline_grouping_key(
pipeline_manager.pipelines[0])))
def test_get_pipeline_duplicate_grouping_key(self):
transformer_cfg = [
{
'name': 'update',
'parameters': {}
},
{
'name': 'update',
'parameters': {}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(['counter_name'],
pipeline.get_pipeline_grouping_key(
pipeline_manager.pipelines[0]))
|
{
"content_hash": "4f17af92d78cc400ddefe7018c2c6066",
"timestamp": "",
"source": "github",
"line_count": 2139,
"max_line_length": 79,
"avg_line_length": 40.137447405329596,
"alnum_prop": 0.5269876767535584,
"repo_name": "idegtiarov/ceilometer",
"id": "867fff51b6755bfc8207f858b8a2be0e96785065",
"size": "86562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/pipeline_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2506039"
},
{
"name": "Shell",
"bytes": "33383"
}
],
"symlink_target": ""
}
|
import collections
import re
import compiler
import tq_types
import typed_ast
class TypeContext(collections.namedtuple(
'TypeContext', ['columns', 'aliases', 'ambig_aliases',
'implicit_column_context', 'aggregate_context'])):
"""Defines the set of valid fields in a point in code, and their types.
Type contexts maintain the order of their fields, which isn't needed for
typical evaluation, but is useful in a few cases, such as SELECT * and when
determining the final names to use for a query result.
Fields:
columns: An OrderedDict mapping from (table name, column name) to type.
aliases: A dict mapping any allowed aliases to their (table, column)
pair. For example, the "value" column on a table "table" has full
name "table.value" but the alias "value" also refers to it (as long
as there are no other tables with a column named "value").
ambig_aliases: A set of aliases that cannot be used because they are
ambiguous.
implicit_column_context: If present, a set of columns that are allowed
to be accessed, but aren't part of the "regular" context. For
example, if the expression "value + 1" is used in a subquery, the
outer query can use "value".
aggregate_context: Either None, indicating that aggregates are not
allowed, or a TypeContext to use if we enter into an aggregate.
"""
@classmethod
def from_table_and_columns(cls, table_name, columns_without_table,
implicit_column_context=None,
aggregate_context=None):
return cls.from_full_columns(
collections.OrderedDict(
((table_name, column_name), col_type)
for column_name, col_type
in columns_without_table.iteritems()),
implicit_column_context, aggregate_context)
@staticmethod
def assert_type(value, expected_type):
assert isinstance(value, expected_type), (
'Expected %s to have type %s, but was %s.' % (
value, expected_type, type(value)))
@classmethod
def from_full_columns(cls, full_columns, implicit_column_context=None,
aggregate_context=None):
"""Given just the columns field, fill in alias information."""
for (table_name, col_name), col_type in full_columns.iteritems():
if table_name is not None:
cls.assert_type(table_name, basestring)
cls.assert_type(col_name, basestring)
cls.assert_type(col_type, tq_types.TYPE_TYPE)
aliases = {}
ambig_aliases = set()
for table_name, column_name in full_columns:
if column_name in ambig_aliases:
continue
elif column_name in aliases:
del aliases[column_name]
ambig_aliases.add(column_name)
else:
aliases[column_name] = (table_name, column_name)
return cls(full_columns, aliases, ambig_aliases,
implicit_column_context, aggregate_context)
@classmethod
def union_contexts(cls, contexts):
"""Creates a type context from the union of others.
This follows the semantics of the comma operator:
-Columns are added in order, and columns already added from previous
tables are kept in their original place.
-All fully-qualified names are removed; columns can only be referenced
by their direct names.
TODO: Do better error handling with things like conflicting types.
"""
result_columns = collections.OrderedDict()
for context in contexts:
assert context.aggregate_context is None
for (_, column_name), col_type in context.columns.iteritems():
full_column = (None, column_name)
if full_column in result_columns:
if result_columns[full_column] == col_type:
continue
raise compiler.CompileError(
'Incompatible types when performing union on field '
'{}: {} vs. {}'.format(full_column,
result_columns[full_column],
col_type))
else:
result_columns[full_column] = col_type
return cls.from_full_columns(result_columns)
@classmethod
def join_contexts(cls, contexts):
result_columns = collections.OrderedDict()
for context in contexts:
result_columns.update(context.columns)
return cls.from_full_columns(result_columns)
def column_ref_for_name(self, name):
"""Gets the full identifier for a column from any possible alias."""
if name in self.columns:
return typed_ast.ColumnRef(name, self.columns[name])
possible_results = []
# Try all possible ways of splitting a dot-separated string.
for match in re.finditer('\.', name):
left_side = name[:match.start()]
right_side = name[match.end():]
result_type = self.columns.get((left_side, right_side))
if result_type is not None:
possible_results.append(
typed_ast.ColumnRef(left_side, right_side, result_type))
if name in self.aliases:
table, column = self.aliases[name]
result_type = self.columns[(table, column)]
possible_results.append(
typed_ast.ColumnRef(table, column, result_type))
if len(possible_results) == 1:
return possible_results[0]
elif len(possible_results) > 1:
raise compiler.CompileError('Ambiguous field: {}'.format(name))
else:
if self.implicit_column_context is not None:
return self.implicit_column_context.column_ref_for_name(name)
else:
raise compiler.CompileError('Field not found: {}'.format(name))
def context_with_subquery_alias(self, subquery_alias):
"""Handle the case where a subquery has an alias.
In this case, it looks like the right approach is to only assign the
alias to the implicit column context, not the full context.
"""
if self.implicit_column_context is None:
return self
new_implicit_column_context = TypeContext.from_full_columns(
collections.OrderedDict(
((subquery_alias, col_name), col_type)
for (_, col_name), col_type
in self.implicit_column_context.columns.iteritems()
)
)
return TypeContext(self.columns, self.aliases, self.ambig_aliases,
new_implicit_column_context, self.aggregate_context)
def context_with_full_alias(self, alias):
assert self.aggregate_context is None
new_columns = collections.OrderedDict(
((alias, col_name), col_type)
for (_, col_name), col_type in self.columns.iteritems()
)
if self.implicit_column_context:
new_implicit_column_context = (
self.implicit_column_context.context_with_full_alias(alias))
else:
new_implicit_column_context = None
return TypeContext.from_full_columns(new_columns,
new_implicit_column_context)
|
{
"content_hash": "dee80939748d47b5a6c208bd94171cb0",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 79,
"avg_line_length": 44.35672514619883,
"alnum_prop": 0.5934080421885299,
"repo_name": "burnhamup/tinyquery",
"id": "68efad5f7c2ce8a0501f3783b314995759a49174",
"size": "7585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyquery/type_context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209784"
}
],
"symlink_target": ""
}
|
from ..interfaces.burpui_api_translate import TranslateBurpuiAPI
from ..dummy.burpui_dummy_api import Clients
class BUIClients:
""""
Get data from burp ui clients
"""
def __init__(self):
"""
"""
# Define clients from Interface
self.clientsobj = Clients
# Get the list of clients from the Interface
self.clients = self.clientsobj.get_clients()
def translate_clients_stats(self):
"""
:return: clients translated
"""
# Set clients list from api interface to TranslateBurpuiAPI object
clients_list_api = TranslateBurpuiAPI(clients=self.clients)
# Translate with method translate_clients()
clients_reports = clients_list_api.translate_clients()
return clients_reports
|
{
"content_hash": "562f6bef8eb4b7f85468a6028e877c57",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 25.93548387096774,
"alnum_prop": 0.6405472636815921,
"repo_name": "pablodav/burp_server_reports",
"id": "33b55283aa5738c91be1a826afc864cf785794bd",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burp_reports/dummy/burpui_api_translate_dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "810"
},
{
"name": "Makefile",
"bytes": "613"
},
{
"name": "Python",
"bytes": "107809"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
}
|
try:
from .currentenv import *
except ImportError:
# going to assume that we are in prod and make use of prod settings
from prod import *
|
{
"content_hash": "b5f29ef4689de3e52d9534614e1aed9a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 71,
"avg_line_length": 30,
"alnum_prop": 0.7066666666666667,
"repo_name": "reinbach/tutorus",
"id": "341eed37ced299beeeef10f86fd7f40fc25a9ee8",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorus/settings/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "160397"
},
{
"name": "Python",
"bytes": "168905"
}
],
"symlink_target": ""
}
|
import rq_worker
if __name__ == '__main__':
rq_worker.start_worker("github_zip")
|
{
"content_hash": "40ab38b8043a5f8baea3b390f11114f8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 40,
"avg_line_length": 21.5,
"alnum_prop": 0.6046511627906976,
"repo_name": "total-impact/depsy",
"id": "44d909fb3d1d03f73654de3f9635745756c30c73",
"size": "86",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "github_zip_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77434"
},
{
"name": "HTML",
"bytes": "57842"
},
{
"name": "JavaScript",
"bytes": "191406"
},
{
"name": "PLSQL",
"bytes": "613"
},
{
"name": "Python",
"bytes": "262098"
},
{
"name": "Shell",
"bytes": "4934"
}
],
"symlink_target": ""
}
|
from ._UniqueID import *
|
{
"content_hash": "61cd18beadb8a8c8fa879d32704e3107",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.72,
"repo_name": "superdyzio/PWR-Stuff",
"id": "c2c2533bd56e7403c1485a795038f6da7bc9925c",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIR-ARR/Projekt Zespołowy/catkin_ws/devel/lib/python2.7/dist-packages/uuid_msgs/msg/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "17829"
},
{
"name": "Batchfile",
"bytes": "1042"
},
{
"name": "C",
"bytes": "2403055"
},
{
"name": "C#",
"bytes": "625528"
},
{
"name": "C++",
"bytes": "3066245"
},
{
"name": "CMake",
"bytes": "983251"
},
{
"name": "CSS",
"bytes": "218848"
},
{
"name": "Common Lisp",
"bytes": "378578"
},
{
"name": "HTML",
"bytes": "4999679"
},
{
"name": "Java",
"bytes": "475300"
},
{
"name": "JavaScript",
"bytes": "266296"
},
{
"name": "M",
"bytes": "2385"
},
{
"name": "M4",
"bytes": "3010"
},
{
"name": "Makefile",
"bytes": "3734730"
},
{
"name": "Matlab",
"bytes": "160418"
},
{
"name": "OCaml",
"bytes": "2021"
},
{
"name": "PHP",
"bytes": "10629"
},
{
"name": "Perl",
"bytes": "7551"
},
{
"name": "PowerShell",
"bytes": "31323"
},
{
"name": "Python",
"bytes": "607184"
},
{
"name": "QMake",
"bytes": "1211"
},
{
"name": "Scala",
"bytes": "4781"
},
{
"name": "Shell",
"bytes": "1550640"
},
{
"name": "Tcl",
"bytes": "4143"
},
{
"name": "q",
"bytes": "1050"
}
],
"symlink_target": ""
}
|
from arcpy import GetParameterAsText
from esri2open import writeFile, prepareGeoJSON, closeJSON
#compute the peramaters
features = GetParameterAsText(0).split(";")
outJSON=GetParameterAsText(1)
includeGeometry = "geojson"
fileType = "geojson"
out=prepareGeoJSON(outJSON)
first=True#this makes sure sure we arn't missing commas
for feature in features:
if feature[0] in ("'",'"'):
feature = feature[1:-1]
writeFile(out,feature,fileType,includeGeometry, first)
first=False
closeJSON(out)
|
{
"content_hash": "8c737cf0339f97c2492275b68fd3cce0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 34.6,
"alnum_prop": 0.7418111753371869,
"repo_name": "opendata/esri2open",
"id": "27365d05bedfa7acd6dfe46100b2ffcc6c7b5755",
"size": "519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Install/merge.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
# perform the install
setup(
name='girder-audit-logs',
version='0.2.0a1',
description='Keeps detailed logs of every REST request and low-level file download event.',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
],
packages=find_packages(),
zip_safe=False,
install_requires=['girder>=3.0.0a1'],
entry_points={
'girder.plugin': [
'audit_logs = girder_audit_logs:AuditLogsPlugin'
]
}
)
|
{
"content_hash": "f47ff1d72f7a6ef9c7ff54496c86d77c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 95,
"avg_line_length": 31.774193548387096,
"alnum_prop": 0.6081218274111675,
"repo_name": "kotfic/girder",
"id": "f1c7ec04143e26f4579be800e1bd0987baa0d868",
"size": "1779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/audit_logs/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "38260"
},
{
"name": "CSS",
"bytes": "54843"
},
{
"name": "Dockerfile",
"bytes": "2482"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "139763"
},
{
"name": "JavaScript",
"bytes": "1129529"
},
{
"name": "Mako",
"bytes": "7873"
},
{
"name": "Python",
"bytes": "2117090"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "9921"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
}
|
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import locate
# Globals
locate.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LocateTestCase(TestCase):
'''
Test cases for salt.modules.locate
'''
# 'version' function tests: 1
def test_version(self):
'''
Test if it returns the version of locate
'''
mock = MagicMock(return_value='mlocate 0.26')
with patch.dict(locate.__salt__, {'cmd.run': mock}):
self.assertListEqual(locate.version(), ['mlocate 0.26'])
# 'stats' function tests: 1
def test_stats(self):
'''
Test if it returns statistics about the locate database
'''
ret = {'files': '75,253',
'directories': '49,252',
'bytes in file names': '93,214',
'bytes used to store database': '29,165',
'database': '/var/lib/mlocate/mlocate.db'}
mock_ret = '''Database /var/lib/mlocate/mlocate.db:
49,252 directories
75,253 files
93,214 bytes in file names
29,165 bytes used to store database'''
with patch.dict(locate.__salt__,
{'cmd.run': MagicMock(return_value=mock_ret)}):
self.assertDictEqual(locate.stats(), ret)
# 'updatedb' function tests: 1
def test_updatedb(self):
'''
Test if it updates the locate database
'''
mock = MagicMock(return_value='')
with patch.dict(locate.__salt__, {'cmd.run': mock}):
self.assertListEqual(locate.updatedb(), [])
# 'locate' function tests: 1
def test_locate(self):
'''
Test if it performs a file lookup.
'''
mock = MagicMock(return_value='')
with patch.dict(locate.__salt__, {'cmd.run': mock}):
self.assertListEqual(locate.locate('wholename', database='myfile'), [])
if __name__ == '__main__':
from integration import run_tests
run_tests(LocateTestCase, needs_daemon=False)
|
{
"content_hash": "395ef458f4b22531354c3e4ffad3ac4a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 83,
"avg_line_length": 27.28735632183908,
"alnum_prop": 0.5855096882898062,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "ab1a698056c7ca50afda7f453ec8439fbf93ccbf",
"size": "2398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/unit/modules/locate_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
"""
Testing the cli
"""
##############################################################################
# Imports
##############################################################################
import contextlib
import sys # late import to avoid breaking capsys fixture
import os
import runpy
import pytest
# a simple selftest for pytest capsys fixture
def test_capsys(capsys):
print('smthg')
sys.stderr.write('smthgelse')
out, err = capsys.readouterr()
assert 'smthg' in out
assert 'smthgelse' in err
##############################################################################
# Test Class
##############################################################################
class TestCLI(object): # not a unittest.TestCase, since we rely on pytest capsys fixture
def setup(self):
self.cli_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'scripts', 'cli.py')
def test_help(self, capsys):
import sys # late import to avoid breaking capsys fixture
# redirecting stdout and stderr since we are testing a script running on command line
sys.argv = ['', '--help']
with pytest.raises(SystemExit) as excinfo:
runpy.run_path(self.cli_path, run_name='__main__')
assert excinfo.value.code == 0 # success
out, err = capsys.readouterr()
# Note other output can get mixed here (internal loggers propagated upwards to the top)
# We only want to assert a subset of the output
rel_script_path = os.path.relpath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'scripts', 'cli.py'))
assert "usage: " + rel_script_path + " [-h|--help] [--version]" in out
def test_version(self, capsys):
# redirecting stdout and stderr since we are testing a script running on command line
sys.argv = ['', '--version']
with pytest.raises(SystemExit) as excinfo:
runpy.run_path(self.cli_path, run_name='__main__')
assert excinfo.value.code == 0 # success
out, err = capsys.readouterr()
# Note other output can get mixed here (internal loggers propagated upwards to the top)
# We only want to assert a subset of the output
assert "ROS1 pip pytemplate version 0.1.1" in out
def test_noargs(self, capsys):
# redirecting stdout and stderr since we are testing a script running on command line
sys.argv = ['']
runpy.run_path(self.cli_path, run_name='__main__')
out, err = capsys.readouterr()
# Note other output can get mixed here (internal loggers propagated upwards to the top)
# We only want to assert a subset of the output
assert "STATUS: 200" in out
assert "args: {}" in out
assert "origin: " in out # origin will depend on machine
assert "url: http://httpbin.org/get" in out
def test_args(self, capsys):
# redirecting stdout and stderr since we are testing a script running on command line
sys.argv = ['', '--arg1', 'val1', '--arg2', 'val2']
runpy.run_path(self.cli_path, run_name='__main__')
out, err = capsys.readouterr()
# Note other output can get mixed here (internal loggers propagated upwards to the top)
# We only want to assert a subset of the output
assert "STATUS: 200" in out
assert "args: {arg1: val1, arg2: val2}" in out
assert "origin: " in out # origin will depend on machine
assert "url: http://httpbin.org/get?arg1=val1&arg2=val2" or "url: http://httpbin.org/get?arg2=val2&arg1=val1" in out
def test_bad_arg(self, capsys):
# redirecting stdout and stderr since we are testing a script running on command line
sys.argv = ['', '-badarg']
with pytest.raises(SystemExit) as excinfo:
runpy.run_path(self.cli_path, run_name='__main__')
assert excinfo.value.code == 127 # error
out, err = capsys.readouterr()
# Note other output can get mixed here (internal loggers propagated upwards to the top)
# We only want to assert a subset of the output
rel_script_path = os.path.relpath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'scripts', 'cli.py'))
assert "usage: " + rel_script_path + " [-h|--help] [--version]" in out
assert "Invalid Argument: -badarg" in err
# In case we run this directly, use pytest
if __name__ == '__main__':
pytest.main(['-x', __file__])
|
{
"content_hash": "1b962e1d58636d4695bee514832cf244",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 124,
"avg_line_length": 40.2212389380531,
"alnum_prop": 0.5938393839383939,
"repo_name": "pyros-dev/ros1_template",
"id": "98da2ec67be94308c999ca80910d32608a1af97d",
"size": "4611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ros1_pip_pytemplate/tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13446"
},
{
"name": "C++",
"bytes": "23761"
},
{
"name": "CMake",
"bytes": "17179"
},
{
"name": "Makefile",
"bytes": "13612"
},
{
"name": "Python",
"bytes": "76603"
}
],
"symlink_target": ""
}
|
from go import strings
from go import regexp
#from go import os
from go import io/ioutil
from go import net
from . import dns
from . import flag
from . import hexdump
PORT = flag.Int('port', 0, 'UDP port to listen on for DNS.')
UDPMAX = 512 # Should be enough bytes for DNS packets.
# [1] is Before the quote, [2] is In the quote, [3] is after.
FindQuote = regexp.MustCompile('^([^;"]*)["]([^"]*)["](.*)$').FindStringSubmatch
# [1] is Before the semicolon.
FindComment = regexp.MustCompile('^([^;]*)[;].*$').FindStringSubmatch
# [1] is first word, [2] is rest.
FindWord = regexp.MustCompile('^([-A-Za-z0-9_.:$@/*]+)\\s*(.*)').FindStringSubmatch
# [1] is rest.
FindWhiteSpace = regexp.MustCompile('^\\s+(.*)').FindStringSubmatch
# [1] ( [2]
FindUnclosedParen = regexp.MustCompile('^([^()]*)[(]([^()]*)$').FindStringSubmatch
# [1] ( [2] ) [3]
FindClosedParen = regexp.MustCompile('^([^()]*)[(]([^()]*)[)]([^()]*)$').FindStringSubmatch
def DropTrailingDot(s):
if s and len(s) > 1 and s[-1] == '.':
return s[:-1]
return s
def ParseBody(d, body, origin):
ttl = dns.TTL
current = origin # Default if no domain in column 1.
lines = strings.Split(body, '\n')
i = 0
n = len(lines)
while i < n:
line = lines[i]
# Try removing quoted from near end of line.
quoted = []
while True:
fq = FindQuote(line) # Finds first quote.
if not fq:
break
_, front, inside, back = fq
line = front + ' ' + back
quoted.append(inside)
# Try removing semicolon comment.
fc = FindComment(line)
if fc:
_, line = fc
# Handle open but no close paren.
fup = FindUnclosedParen(line)
if fup:
while not FindClosedParen(line):
i += 1
must i < n, ('Missing close paren', line)
line += lines[i]
fc = FindComment(line)
if fc:
_, line = fc
fcp = FindClosedParen(line)
if fcp:
_, front, middle, _ = fcp
line = front + ' ' + middle
# Now we have an entire line.
orig = line
# Find first word, which may be missing.
word1 = current
fw1 = FindWord(line)
if fw1:
_, word1, line = fw1
if word1 == '@':
word1 = origin
if word1[0] != '$':
current = word1 # Set new default.
else:
# If did not remove a first word,
# we didn't remove any white space either,
# so do it now. word1 defaults to current.
fws = FindWhiteSpace(line)
if fws:
_, line = FindWhiteSpace(line)
words = [word1]
while True:
fw = FindWord(line)
if not fw:
break
words.append(fw[1])
line = fw[2]
# Anything left over had better be white space.
fws = FindWhiteSpace(line)
if fws:
_, remnant = fws
line = remnant
if line:
raise 'Bad line had remaining stuff', orig, remnant
# Replace @ with origin.
words = [(origin if w == '@' else w) for w in words]
# Special commands, $ORIGIN and $TTL.
if words[0] == '$ORIGIN':
say words
origin = dns.Absolute(words[1], current)
i += 1
continue
if words[0] == '$TTL':
say words
ttl = int(words[1])
i += 1
continue
##### say quoted, words, orig
rr = dns.MakeRR(words, quoted, current, ttl)
if rr:
vec = d.get(rr.name)
if vec is None:
vec = []
d[rr.name] = vec
vec.append(rr)
i += 1
return rr
def Serve(d):
addy = go_new(net.UDPAddr)
addy.Port = PORT.X
say "Listening..."
conn = net.ListenUDP("udp4", addy)
conn.SetReadBuffer(4096)
while True:
buf = mkbyt(UDPMAX)
say "ReadingFromUDP..."
n, addr = conn.ReadFromUDP(buf)
say n, addr, buf
go Answer(d, buf, n, addr, conn)
def Answer(d, buf, n, addr, conn):
try:
hexdump.HexDump(buf[:n], 'Packet IN')
q = dns.ReadQuestion(buf, n)
vec = d.get(q.name)
if vec:
buf2 = mkbyt(UDPMAX)
w = dns.Writer(buf2)
w.WriteHead1(q.serial, 0)
na = 0
for rr in vec:
if q.typ == 255 or rr.typ == q.typ:
na += 1
#if na >= 2:
# break
w.WriteHead2(1, na, 0, 0)
w.WriteQuestion(q)
j = 0
for rr in vec:
if q.typ == 255 or rr.typ == q.typ:
rr.WriteRR(w)
j += 1
#if j >= 2:
# break
packet = buf2[:w.i]
hexdump.HexDump(buf[:n], 'Packet IN')
hexdump.HexDump(packet, 'Packet OUT')
conn.WriteToUDP(packet, addr)
pass
except as ex:
say 'CAUGHT', ex
def Slurp(d, filename):
say filename
origin = strings.Split(filename, '/')[-1]
say origin
body = ioutil.ReadFile(filename)
ParseBody(d, body, origin)
def main(argv):
filenames = flag.Munch(argv)
d = {}
if not filenames:
raise 'Arguments required for zonefile filenames'
for filename in filenames:
Slurp(d, filename)
Serve(d)
|
{
"content_hash": "c427a922a4e956a1f09eb1a85d77b8a9",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 91,
"avg_line_length": 23.764705882352942,
"alnum_prop": 0.568069306930693,
"repo_name": "strickyak/aphid",
"id": "9162450a207663651a25fc02a1d1ecc26a05c875",
"size": "5324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old1/zoner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "31"
},
{
"name": "CSS",
"bytes": "74"
},
{
"name": "Common Lisp",
"bytes": "172"
},
{
"name": "Go",
"bytes": "8414"
},
{
"name": "HTML",
"bytes": "5190"
},
{
"name": "JavaScript",
"bytes": "99"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Python",
"bytes": "337935"
},
{
"name": "Ring",
"bytes": "15058"
},
{
"name": "Roff",
"bytes": "194"
},
{
"name": "Shell",
"bytes": "8238"
},
{
"name": "Tcl",
"bytes": "27464"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(0, "../../python")
import os.path
import mxnet as mx
from config_util import get_checkpoint_path, parse_contexts
from stt_metric import STTMetric
#tensorboard setting
from tensorboard import SummaryWriter
import json
from stt_bucketing_module import STTBucketingModule
def get_initializer(args):
init_type = getattr(mx.initializer, args.config.get('train', 'initializer'))
init_scale = args.config.getfloat('train', 'init_scale')
if init_type is mx.initializer.Xavier:
return mx.initializer.Xavier(magnitude=init_scale, factor_type=args.config.get('train', 'factor_type'))
return init_type(init_scale)
class SimpleLRScheduler(mx.lr_scheduler.LRScheduler):
"""A simple lr schedule that simply return `dynamic_lr`. We will set `dynamic_lr`
dynamically based on performance on the validation set.
"""
def __init__(self, learning_rate=0.001):
super(SimpleLRScheduler, self).__init__()
self.learning_rate = learning_rate
def __call__(self, num_update):
return self.learning_rate
def do_training(args, module, data_train, data_val, begin_epoch=0):
from distutils.dir_util import mkpath
from log_util import LogUtil
log = LogUtil().getlogger()
mkpath(os.path.dirname(get_checkpoint_path(args)))
#seq_len = args.config.get('arch', 'max_t_count')
batch_size = args.config.getint('common', 'batch_size')
save_checkpoint_every_n_epoch = args.config.getint('common', 'save_checkpoint_every_n_epoch')
save_checkpoint_every_n_batch = args.config.getint('common', 'save_checkpoint_every_n_batch')
enable_logging_train_metric = args.config.getboolean('train', 'enable_logging_train_metric')
enable_logging_validation_metric = args.config.getboolean('train', 'enable_logging_validation_metric')
contexts = parse_contexts(args)
num_gpu = len(contexts)
eval_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu, is_logging=enable_logging_validation_metric,is_epoch_end=True)
# tensorboard setting
loss_metric = STTMetric(batch_size=batch_size, num_gpu=num_gpu, is_logging=enable_logging_train_metric,is_epoch_end=False)
optimizer = args.config.get('optimizer', 'optimizer')
learning_rate = args.config.getfloat('train', 'learning_rate')
learning_rate_annealing = args.config.getfloat('train', 'learning_rate_annealing')
mode = args.config.get('common', 'mode')
num_epoch = args.config.getint('train', 'num_epoch')
clip_gradient = args.config.getfloat('optimizer', 'clip_gradient')
weight_decay = args.config.getfloat('optimizer', 'weight_decay')
save_optimizer_states = args.config.getboolean('train', 'save_optimizer_states')
show_every = args.config.getint('train', 'show_every')
optimizer_params_dictionary = json.loads(args.config.get('optimizer', 'optimizer_params_dictionary'))
kvstore_option = args.config.get('common', 'kvstore_option')
n_epoch=begin_epoch
is_bucketing = args.config.getboolean('arch', 'is_bucketing')
if clip_gradient == 0:
clip_gradient = None
if is_bucketing and mode == 'load':
model_file = args.config.get('common', 'model_file')
model_name = os.path.splitext(model_file)[0]
model_num_epoch = int(model_name[-4:])
model_path = 'checkpoints/' + str(model_name[:-5])
symbol, data_names, label_names = module(1600)
model = STTBucketingModule(
sym_gen=module,
default_bucket_key=data_train.default_bucket_key,
context=contexts)
data_train.reset()
model.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
_, arg_params, aux_params = mx.model.load_checkpoint(model_path, model_num_epoch)
model.set_params(arg_params, aux_params)
module = model
else:
module.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label,
for_training=True)
if begin_epoch == 0 and mode == 'train':
module.init_params(initializer=get_initializer(args))
lr_scheduler = SimpleLRScheduler(learning_rate=learning_rate)
def reset_optimizer(force_init=False):
optimizer_params = {'lr_scheduler': lr_scheduler,
'clip_gradient': clip_gradient,
'wd': weight_decay}
optimizer_params.update(optimizer_params_dictionary)
module.init_optimizer(kvstore=kvstore_option,
optimizer=optimizer,
optimizer_params=optimizer_params,
force_init=force_init)
if mode == "train":
reset_optimizer(force_init=True)
else:
reset_optimizer(force_init=False)
data_train.reset()
data_train.is_first_epoch = True
#tensorboard setting
tblog_dir = args.config.get('common', 'tensorboard_log_dir')
summary_writer = SummaryWriter(tblog_dir)
while True:
if n_epoch >= num_epoch:
break
loss_metric.reset()
log.info('---------train---------')
for nbatch, data_batch in enumerate(data_train):
module.forward_backward(data_batch)
module.update()
# tensorboard setting
if (nbatch + 1) % show_every == 0:
module.update_metric(loss_metric, data_batch.label)
#summary_writer.add_scalar('loss batch', loss_metric.get_batch_loss(), nbatch)
if (nbatch+1) % save_checkpoint_every_n_batch == 0:
log.info('Epoch[%d] Batch[%d] SAVE CHECKPOINT', n_epoch, nbatch)
module.save_checkpoint(prefix=get_checkpoint_path(args)+"n_epoch"+str(n_epoch)+"n_batch", epoch=(int((nbatch+1)/save_checkpoint_every_n_batch)-1), save_optimizer_states=save_optimizer_states)
# commented for Libri_sample data set to see only train cer
log.info('---------validation---------')
data_val.reset()
eval_metric.reset()
for nbatch, data_batch in enumerate(data_val):
# when is_train = False it leads to high cer when batch_norm
module.forward(data_batch, is_train=True)
module.update_metric(eval_metric, data_batch.label)
# tensorboard setting
val_cer, val_n_label, val_l_dist, _ = eval_metric.get_name_value()
log.info("Epoch[%d] val cer=%f (%d / %d)", n_epoch, val_cer, int(val_n_label - val_l_dist), val_n_label)
curr_acc = val_cer
summary_writer.add_scalar('CER validation', val_cer, n_epoch)
assert curr_acc is not None, 'cannot find Acc_exclude_padding in eval metric'
data_train.reset()
data_train.is_first_epoch = False
# tensorboard setting
train_cer, train_n_label, train_l_dist, train_ctc_loss = loss_metric.get_name_value()
summary_writer.add_scalar('loss epoch', train_ctc_loss, n_epoch)
summary_writer.add_scalar('CER train', train_cer, n_epoch)
# save checkpoints
if n_epoch % save_checkpoint_every_n_epoch == 0:
log.info('Epoch[%d] SAVE CHECKPOINT', n_epoch)
module.save_checkpoint(prefix=get_checkpoint_path(args), epoch=n_epoch, save_optimizer_states=save_optimizer_states)
n_epoch += 1
lr_scheduler.learning_rate=learning_rate/learning_rate_annealing
log.info('FINISH')
|
{
"content_hash": "34ae1689904a8264964a644d9558fd4b",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 207,
"avg_line_length": 43.917647058823526,
"alnum_prop": 0.6467988213233324,
"repo_name": "lxn2/mxnet",
"id": "f3a7555529e3d2c223b0e4b9936ccc7077d7139d",
"size": "7466",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "example/speech_recognition/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "89393"
},
{
"name": "C++",
"bytes": "3189126"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "566898"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40032"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "615878"
},
{
"name": "Perl6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3084885"
},
{
"name": "R",
"bytes": "280777"
},
{
"name": "Scala",
"bytes": "855146"
},
{
"name": "Shell",
"bytes": "109919"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages, Extension
from pkgutil import get_importer
from collections import defaultdict
from functools import wraps
from distutils import sysconfig
import re
from fnmatch import fnmatch
from os.path import join
import os
def find(directory, patterns):
result = []
for node, _, filenames in os.walk(directory):
for filename in filenames:
for pattern in patterns:
if fnmatch(filename, pattern):
result.append(join(node, filename))
return result
def lazy(function):
@wraps(function)
def wrapped(*args, **kwargs):
class LazyProxy(object):
def __init__(self, function, args, kwargs):
self._function = function
self._args = args
self._kwargs = kwargs
self._result = None
def __len__(self):
return self.__len__()
def __iter__(self):
return self.__iter__()
def __getattribute__(self, name):
if name in ['_function', '_args', '_kwargs', '_result']:
return super(LazyProxy, self).__getattribute__(name)
if self._result is None:
self._result = self._function(*self._args, **self._kwargs)
return object.__getattribute__(self._result, name)
def __setattr__(self, name, value):
if name in ['_function', '_args', '_kwargs', '_result']:
super(LazyProxy, self).__setattr__(name, value)
return
if self._result is None:
self._result = self._function(*self._args, **self._kwargs)
setattr(self._result, name, value)
return LazyProxy(function, args, kwargs)
return wrapped
# Navigate, import, and retrieve the metadata of the project.
meta = get_importer('src/hummus').find_module('meta').load_module('meta')
def make_config():
from pkgconfig import parse
# Process the `pkg-config` utility and discover include and library
# directories.
config = defaultdict(set)
for lib in ['zlib', 'libtiff-4', 'freetype2']:
for key, value in parse(lib).items():
config[key].update(value)
# Add libjpeg (no .pc file).
config['libraries'].add('jpeg')
# List-ify config for setuptools.
for key in config:
config[key] = list(config[key])
# Add hummus.
config['include_dirs'].insert(0, 'lib/hummus/PDFWriter')
config['include_dirs'].insert(0, 'lib/python')
# Add local library.
config['include_dirs'].insert(0, 'src')
# Return built config.
return config
@lazy
def make_extension(name, sources=None, cython=True):
# Resolve extension location from name.
location = join('src', *name.split('.'))
location += '.pyx' if cython else '.cpp'
# NOTE: Performing black magic hacks to remove --as-needed from the linker
# flags if present.
sysconfig.get_config_vars()
lds = sysconfig._config_vars['LDSHARED']
sysconfig._config_vars['LDSHARED'] = re.sub(r',?--as-needed,??', '', lds)
config = make_config()
config['libraries'].insert(0, 'hummus')
# Create and return the extension.
return Extension(
name=name,
sources=sources + [location] if sources else [location],
language='c++',
**config)
@lazy
def make_library(name, directory):
patterns = ['*.cxx', '*.cpp']
return [name, dict(sources=find(directory, patterns), **make_config())]
setup(
name='hummus',
version=meta.version,
description=meta.description,
author='Concordus Applications',
author_email='support@concordusapps.com',
url='https://github.com/concordusapps/python-hummus',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
],
package_dir={'hummus': 'src/hummus'},
packages=find_packages('src'),
setup_requires=[
'setuptools_cython',
'pkgconfig'
],
install_requires=[
'six',
'wand',
],
extras_require={
'test': ['pytest'],
},
libraries=[
make_library('hummus', 'lib/hummus/PDFWriter'),
],
ext_modules=[
make_extension('hummus.reader'),
make_extension('hummus.writer'),
make_extension('hummus.rectangle'),
make_extension('hummus.page'),
make_extension('hummus.context'),
make_extension('hummus.text'),
make_extension('hummus.image'),
make_extension(
name='hummus.interface',
sources=find('lib/python/interface', ['*.cxx'])),
]
)
|
{
"content_hash": "1cfc2fcbf045797418209472ebadd762",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 29.06508875739645,
"alnum_prop": 0.5846905537459284,
"repo_name": "concordusapps/python-hummus",
"id": "dd5c7421f00eac47354be40f6011b9afcba03077",
"size": "4958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5341"
},
{
"name": "Python",
"bytes": "41539"
}
],
"symlink_target": ""
}
|
import base64
import copy
import sys
import time
from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models import test_run_results
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
# can be overridden by a keyword argument in TestList.add().
class TestInstance(object):
def __init__(self, name):
self.name = name
self.base = name[(name.rfind("/") + 1):name.rfind(".")]
self.crash = False
self.web_process_crash = False
self.exception = False
self.keyboard = False
self.error = ''
self.timeout = False
self.is_reftest = False
self.device_failure = False
self.leak = False
# The values of each field are treated as raw byte strings. They
# will be converted to unicode strings where appropriate using
# FileSystem.read_text_file().
self.actual_text = self.base + '-txt'
self.actual_checksum = self.base + '-checksum'
# We add the '\x8a' for the image file to prevent the value from
# being treated as UTF-8 (the character is invalid)
self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
self.expected_text = self.actual_text
self.expected_image = self.actual_image
self.actual_audio = None
self.expected_audio = None
# This is an in-memory list of tests, what we want them to produce, and
# what we want to claim are the expected results.
class TestList(object):
def __init__(self):
self.tests = {}
def add(self, name, **kwargs):
test = TestInstance(name)
for key, value in kwargs.items():
test.__dict__[key] = value
self.tests[name] = test
def add_reftest(self, name, reference_name, same_image, crash=False):
self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
if same_image:
self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
else:
self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
def keys(self):
return self.tests.keys()
def __contains__(self, item):
return item in self.tests
def __getitem__(self, item):
return self.tests[item]
#
# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
#
TOTAL_TESTS = 114
TOTAL_SKIPS = 29
UNEXPECTED_PASSES = 1
UNEXPECTED_FAILURES = 26
def unit_test_list():
tests = TestList()
tests.add('failures/expected/crash.html', crash=True)
tests.add('failures/expected/exception.html', exception=True)
tests.add('failures/expected/device_failure.html', device_failure=True)
tests.add('failures/expected/timeout.html', timeout=True)
tests.add('failures/expected/leak.html', leak=True)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
tests.add('failures/expected/image.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/expected/image_checksum.html',
actual_checksum='image_checksum_fail-checksum',
actual_image='image_checksum_fail-png')
tests.add('failures/expected/audio.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/missing_check.html',
expected_image='missing_check-png')
tests.add('failures/expected/missing_image.html', expected_image=None)
tests.add('failures/expected/missing_audio.html', expected_audio=None,
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/newlines_leading.html',
expected_text="\nfoo\n", actual_text="foo\n")
tests.add('failures/expected/newlines_trailing.html',
expected_text="foo\n\n", actual_text="foo\n")
tests.add('failures/expected/newlines_with_excess_CR.html',
expected_text="foo\r\r\r\n", actual_text="foo\n")
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/crash_then_text.html')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/text_then_crash.html')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('failures/unexpected/leak.html', leak=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
# This adds a different virtual reference to ensure that that also works.
tests.add('virtual/virtual_passes/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/virtual_passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
# in order to fully control the test output and to demonstrate that
# we don't need a real filesystem to run the tests.
def add_unit_tests_to_mock_filesystem(filesystem):
# Add the test_expectations file.
filesystem.maybe_make_directory('/mock-checkout/LayoutTests')
if not filesystem.exists('/mock-checkout/LayoutTests/TestExpectations'):
filesystem.write_text_file('/mock-checkout/LayoutTests/TestExpectations', """
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/crash_then_text.html [ Failure ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/audio.html [ Failure ]
Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
Bug(test) failures/expected/missing_check.html [ Missing Pass ]
Bug(test) failures/expected/missing_image.html [ Missing Pass ]
Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
Bug(test) failures/expected/missing_text.html [ Missing Pass ]
Bug(test) failures/expected/newlines_leading.html [ Failure ]
Bug(test) failures/expected/newlines_trailing.html [ Failure ]
Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
Bug(test) failures/expected/keyboard.html [ WontFix ]
Bug(test) failures/expected/exception.html [ WontFix ]
Bug(test) failures/expected/device_failure.html [ WontFix ]
Bug(test) failures/expected/leak.html [ Leak ]
Bug(test) failures/unexpected/pass.html [ Failure ]
Bug(test) passes/skipped/skip.html [ Skip ]
Bug(test) passes/text.html [ Pass ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
== test.html test-ref.html
== multiple-match-success.html mismatching-ref.html
== multiple-match-success.html matching-ref.html
== multiple-match-failure.html mismatching-ref.html
== multiple-match-failure.html second-mismatching-ref.html
!= multiple-mismatch-success.html mismatching-ref.html
!= multiple-mismatch-success.html second-mismatching-ref.html
!= multiple-mismatch-failure.html mismatching-ref.html
!= multiple-mismatch-failure.html matching-ref.html
== multiple-both-success.html matching-ref.html
== multiple-both-success.html mismatching-ref.html
!= multiple-both-success.html second-mismatching-ref.html
== multiple-both-failure.html matching-ref.html
!= multiple-both-failure.html second-mismatching-ref.html
!= multiple-both-failure.html matching-ref.html
""")
# FIXME: This test was only being ignored because of missing a leading '/'.
# Fixing the typo causes several tests to assert, so disabling the test entirely.
# Add in a file should be ignored by port.find_test_files().
#files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
def add_file(test, suffix, contents):
dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
base = test.base
filesystem.maybe_make_directory(dirname)
filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
# Add each test and the expected output, if any.
test_list = unit_test_list()
for test in test_list.tests.values():
add_file(test, test.name[test.name.rfind('.'):], '')
if test.is_reftest:
continue
if test.actual_audio:
add_file(test, '-expected.wav', test.expected_audio)
continue
add_file(test, '-expected.txt', test.expected_text)
add_file(test, '-expected.png', test.expected_image)
filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'virtual_passes', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
# Clear the list of written files so that we can watch what happens during testing.
filesystem.clear_written_files()
class TestPort(Port):
port_name = 'test'
default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
'test-linux-x86_64',
'test-mac-snowleopard', 'test-mac-leopard',
'test-win-win7', 'test-win-xp',
)
FALLBACK_PATHS = {
'xp': ['test-win-win7', 'test-win-xp'],
'win7': ['test-win-win7'],
'leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
'snowleopard': ['test-mac-snowleopard'],
'lucid': ['test-linux-x86_64', 'test-win-win7'],
}
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
# FIXME: crbug.com/279494. This needs to be in the "real layout tests
# dir" in a mock filesystem, rather than outside of the checkout, so
# that tests that want to write to a TestExpectations file can share
# this between "test" ports and "real" ports. This is the result of
# rebaseline_unittest.py having tests that refer to "real" port names
# and real builders instead of fake builders that point back to the
# test ports. rebaseline_unittest.py needs to not mix both "real" ports
# and "test" ports
self._generic_expectations_path = '/mock-checkout/LayoutTests/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
if self._name.startswith('test-win'):
self._operating_system = 'win'
elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
'test-win-xp': 'xp',
'test-win-win7': 'win7',
'test-mac-leopard': 'leopard',
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
self._version = version_map[self._name]
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
# FIXME: We override this just to keep the perf tests happy.
return [('blink', self.layout_tests_dir())]
def buildbot_archives_baselines(self):
return self._name != 'test-win-xp'
def default_pixel_tests(self):
return True
def _path_to_driver(self):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
return 'MOCK _path_to_driver'
def default_child_processes(self):
return 1
def check_build(self, needs_http, printer):
return test_run_results.OK_EXIT_STATUS
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def default_configuration(self):
return 'Release'
def diff_image(self, expected_contents, actual_contents):
diffed = actual_contents != expected_contents
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents or not expected_contents:
return (True, None)
if diffed:
return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
return (None, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped/failures/expected'])
def name(self):
return self._name
def operating_system(self):
return self._operating_system
def _path_to_wdiff(self):
return None
def default_results_directory(self):
return '/tmp/layout-test-results'
def setup_test_run(self):
pass
def _driver_class(self):
return TestDriver
def start_http_server(self, additional_dirs, number_of_drivers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def path_to_apache(self):
return "/usr/sbin/httpd"
def path_to_apache_config_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
def path_to_generic_test_expectations_file(self):
return self._generic_expectations_path
def _port_specific_expectations_files(self):
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
def all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
test_configurations.append(TestConfiguration(
version=version,
architecture=architecture,
build_type=build_type))
return test_configurations
def _all_systems(self):
return (('leopard', 'x86'),
('snowleopard', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'))
def _all_build_types(self):
return ('debug', 'release')
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def virtual_test_suites(self):
return [
VirtualTestSuite(prefix='virtual_passes', base='passes', args=['--virtual-arg']),
VirtualTestSuite(prefix='skipped', base='failures/expected', args=['--virtual-arg2']),
VirtualTestSuite(prefix='references_use_default_args', base='passes/reftest.html',
args=['--virtual-arg'], references_use_default_args=True),
]
class TestDriver(Driver):
"""Test/Dummy implementation of the driver interface."""
next_pid = 1
def __init__(self, *args, **kwargs):
super(TestDriver, self).__init__(*args, **kwargs)
self.started = False
self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_driver_flag', []) + per_test_args
def run_test(self, driver_input, stop_when_done):
if not self.started:
self.started = True
self.pid = TestDriver.next_pid
TestDriver.next_pid += 1
start_time = time.time()
test_name = driver_input.test_name
test_args = driver_input.args or []
test = self._port._tests[test_name]
if test.keyboard:
raise KeyboardInterrupt
if test.exception:
raise ValueError('exception from ' + test_name)
if test.device_failure:
raise DeviceFailure('device failure in ' + test_name)
audio = None
actual_text = test.actual_text
crash = test.crash
web_process_crash = test.web_process_crash
if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
actual_text = 'flaky text failure'
if 'crash_then_text.html' in test_name:
if test_name in self._port._flakes:
actual_text = 'text failure'
else:
self._port._flakes.add(test_name)
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
if 'text_then_crash.html' in test_name:
if test_name in self._port._flakes:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
else:
self._port._flakes.add(test_name)
actual_text = 'text failure'
if actual_text and test_args and test_name == 'passes/args.html':
actual_text = actual_text + ' ' + ' '.join(test_args)
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
if crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
elif web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
if stop_when_done:
self.stop()
if test.actual_checksum == driver_input.image_hash:
image = None
else:
image = test.actual_image
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
leak=test.leak)
def stop(self):
self.started = False
|
{
"content_hash": "db0e95fcfdb18947d136a6d67405b091",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 150,
"avg_line_length": 43.88709677419355,
"alnum_prop": 0.664571848585079,
"repo_name": "crosswalk-project/blink-crosswalk",
"id": "c4399b7232fc0c2119a326082557decafab2a072",
"size": "28732",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Tools/Scripts/webkitpy/layout_tests/port/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
import time
import traceback
import warnings
import lib.pool
SQL_QUERY = 'SELECT {columns} FROM {table} WHERE project_id = {project_id}'
SQL_INSERT = 'INSERT INTO {table}({columns}) VALUES ({placeholders})'
SQL_UPDATE = '''
UPDATE {table}
SET {placeholders}
WHERE project_id = {project_id}
'''
class Run(object):
def __init__(self, repo_root, attributes, database, threshold, processes):
self.repo_root = repo_root
self.attributes = attributes
self.database = database
self.threshold = threshold
self.processes = processes
def run(self, samples, table):
try:
sys.stdout.write('{0}\n'.format('#' * 25))
sys.stdout.write('{0}\n'.format(str.center('Run', 25)))
sys.stdout.write('{0}\n'.format('#' * 25))
self.attributes.global_init(samples)
with lib.pool.NonDaemonicProcessPool(self.processes) as pool:
pool.starmap(
self._process,
[(project_id, table) for project_id in samples],
chunksize=1
)
sys.stdout.write('{0}\n'.format('#' * 25))
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
def _process(self, project_id, table):
try:
rresults = self.attributes.run(project_id, self.repo_root)
except:
sys.stderr.write('Exception\n\n')
sys.stderr.write(' Project ID {0}\n'.format(project_id))
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
finally:
if rresults is not None:
self._save(project_id, rresults, table)
# HACK: Waiting for mysqld to reclaim its connection
time.sleep(0.5)
def _save(self, project_id, rresults, table):
# Merge raw results from current run with existing ones (if any)
is_existing = False
_rresults = self._get(project_id, table)
if _rresults:
is_existing = True
# Update the dictionary containing attribute values retrieved from
# the database iff at least one of the values is not NULL.
# Typically, a project that was not active at the time of the
# reaper run will have all its attribute values set to NULL.
# However, when re-computing the score, the default values of the
# attributes may overwrite the NULL values in the database.
updatable = False
if len([i for i in _rresults.values() if i is not None]) > 0:
updatable = True
_rresults.update(rresults)
score = self.attributes.score(_rresults)
self._print_outcome(project_id, score)
if self.attributes.is_persistence_enabled:
if is_existing is True and updatable is False:
return
columns = ('project_id', 'score')
values = (project_id, score)
for key in rresults:
if self.attributes.get(key).persist:
if rresults[key] is not None:
columns += (key,)
values += (rresults[key],)
if is_existing:
# Update
query = SQL_UPDATE.format(
project_id=project_id, table=table,
placeholders=('=%s,'.join(columns) + '=%s')
)
else:
# Insert
query = SQL_INSERT.format(
columns=','.join(columns), table=table,
placeholders=','.join(['%s' for i in range(len(columns))])
)
try:
self.database.connect()
self.database.post(query, values)
finally:
self.database.disconnect()
else:
if 'DEBUG' in os.environ:
for (attribute, result) in rresults.items():
print('[{0:10d}] {1:25s} {2}'.format(
project_id, attribute, result
))
def _get(self, project_id, table):
rresults = dict()
try:
columns = [
attribute.name for attribute in self.attributes.attributes
]
self.database.connect()
output = self.database.get(
SQL_QUERY.format(
columns=','.join(columns), table=table,
project_id=project_id
)
)
if output is not None:
for (index, column) in enumerate(columns):
rresults[column] = output[index]
finally:
self.database.disconnect()
return rresults
def _print_outcome(self, project_id, score):
# Generate a green checkmark or red x using terminal escapes
cresult = '\033[92m✓\033[0m'
if score < self.threshold:
cresult = '\033[91m✘\033[0m'
sys.stdout.write(
' [{0:>10d}] {1} {2}\n'.format(project_id, score, cresult)
)
|
{
"content_hash": "9de0e2899851a31272e8ac3c62c10683",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 35.965753424657535,
"alnum_prop": 0.5299942868025138,
"repo_name": "RepoReapers/reaper",
"id": "e648a85160c183e9eea1900b261893b7c38dd06e",
"size": "5255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "133"
},
{
"name": "JavaScript",
"bytes": "392"
},
{
"name": "Objective-C",
"bytes": "59"
},
{
"name": "Python",
"bytes": "145851"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
"""Check the DEPS file for correctness."""
import os
import re
import subprocess
import sys
import utils
INFRA_BOTS_DIR = os.path.dirname(os.path.realpath(__file__))
SKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))
def main():
"""Load the DEPS file and verify that all entries are valid."""
# Find gclient.py and run that instead of simply "gclient", which calls into
# update_depot_tools.
gclient = subprocess.check_output([utils.WHICH, utils.GCLIENT])
gclient_py = os.path.join(os.path.dirname(gclient), 'gclient.py')
python = sys.executable or 'python'
# Obtain the DEPS mapping.
output = subprocess.check_output(
[python, gclient_py, 'revinfo'], cwd=SKIA_DIR)
# Check each entry.
errs = []
for e in output.rstrip().splitlines():
split = e.split(': ')
if len(split) != 2:
errs.append(
'Failed to parse `gclient revinfo` output; invalid format: %s' % e)
if split[0] == 'skia':
continue
split = split[1].split('@')
if len(split) != 2:
errs.append(
'Failed to parse `gclient revinfo` output; invalid format: %s' % e)
repo = split[0]
rev = split[1]
if not 'googlesource.com' in repo:
errs.append(
'DEPS must be hosted on googlesource.com; %s is not allowed.' % repo)
if not re.match(r'^[a-z0-9]{40}$', rev):
errs.append('%s: "%s" does not look like a commit hash.' % (repo, rev))
if errs:
print >> sys.stderr, 'Found problems in DEPS:'
for err in errs:
print >> sys.stderr, err
sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "928716827e6ddc865c76dcf378bb1fff",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 28.75,
"alnum_prop": 0.6260869565217392,
"repo_name": "endlessm/chromium-browser",
"id": "6f3106643d1e9c8c289a6cf75bbc2ac061ffa8b2",
"size": "1768",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/skia/infra/bots/check_deps.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import abc
import six
import struct
from . import packet_base
from . import icmpv6
from . import tcp
from . import udp
from . import sctp
from . import in_proto as inet
from ryu.lib import addrconv
from ryu.lib import stringify
IPV6_ADDRESS_PACK_STR = '!16s'
IPV6_ADDRESS_LEN = struct.calcsize(IPV6_ADDRESS_PACK_STR)
IPV6_PSEUDO_HEADER_PACK_STR = '!16s16s3xB'
class ipv6(packet_base.PacketBase):
"""IPv6 (RFC 2460) header encoder/decoder class.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
IPv6 addresses are represented as a string like 'ff02::1'.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|p{30em}|l|
============== ======================================== ==================
Attribute Description Example
============== ======================================== ==================
version Version
traffic_class Traffic Class
flow_label When decoding, Flow Label.
When encoding, the most significant 8
bits of Flow Label.
payload_length Payload Length
nxt Next Header
hop_limit Hop Limit
src Source Address 'ff02::1'
dst Destination Address '::'
ext_hdrs Extension Headers
============== ======================================== ==================
"""
_PACK_STR = '!IHBB16s16s'
_MIN_LEN = struct.calcsize(_PACK_STR)
_IPV6_EXT_HEADER_TYPE = {}
_TYPE = {
'ascii': [
'src', 'dst'
]
}
@staticmethod
def register_header_type(type_):
def _register_header_type(cls):
ipv6._IPV6_EXT_HEADER_TYPE[type_] = cls
return cls
return _register_header_type
def __init__(self, version=6, traffic_class=0, flow_label=0,
payload_length=0, nxt=inet.IPPROTO_TCP, hop_limit=255,
src='10::10', dst='20::20', ext_hdrs=None):
super(ipv6, self).__init__()
self.version = version
self.traffic_class = traffic_class
self.flow_label = flow_label
self.payload_length = payload_length
self.nxt = nxt
self.hop_limit = hop_limit
self.src = src
self.dst = dst
ext_hdrs = ext_hdrs or []
assert isinstance(ext_hdrs, list)
for ext_hdr in ext_hdrs:
assert isinstance(ext_hdr, header)
self.ext_hdrs = ext_hdrs
@classmethod
def parser(cls, buf):
(v_tc_flow, payload_length, nxt, hlim, src, dst) = struct.unpack_from(
cls._PACK_STR, buf)
version = v_tc_flow >> 28
traffic_class = (v_tc_flow >> 20) & 0xff
flow_label = v_tc_flow & 0xfffff
hop_limit = hlim
offset = cls._MIN_LEN
last = nxt
ext_hdrs = []
while True:
cls_ = cls._IPV6_EXT_HEADER_TYPE.get(last)
if not cls_:
break
hdr = cls_.parser(buf[offset:])
ext_hdrs.append(hdr)
offset += len(hdr)
last = hdr.nxt
msg = cls(version, traffic_class, flow_label, payload_length,
nxt, hop_limit, addrconv.ipv6.bin_to_text(src),
addrconv.ipv6.bin_to_text(dst), ext_hdrs)
return (msg, ipv6.get_packet_type(last),
buf[offset:offset + payload_length])
def serialize(self, payload, prev):
hdr = bytearray(40)
v_tc_flow = (self.version << 28 | self.traffic_class << 20 |
self.flow_label)
struct.pack_into(ipv6._PACK_STR, hdr, 0, v_tc_flow,
self.payload_length, self.nxt, self.hop_limit,
addrconv.ipv6.text_to_bin(self.src),
addrconv.ipv6.text_to_bin(self.dst))
if self.ext_hdrs:
for ext_hdr in self.ext_hdrs:
hdr.extend(ext_hdr.serialize())
if 0 == self.payload_length:
payload_length = len(payload)
for ext_hdr in self.ext_hdrs:
payload_length += len(ext_hdr)
self.payload_length = payload_length
struct.pack_into('!H', hdr, 4, self.payload_length)
return hdr
def __len__(self):
ext_hdrs_len = 0
for ext_hdr in self.ext_hdrs:
ext_hdrs_len += len(ext_hdr)
return self._MIN_LEN + ext_hdrs_len
ipv6.register_packet_type(icmpv6.icmpv6, inet.IPPROTO_ICMPV6)
ipv6.register_packet_type(tcp.tcp, inet.IPPROTO_TCP)
ipv6.register_packet_type(udp.udp, inet.IPPROTO_UDP)
ipv6.register_packet_type(sctp.sctp, inet.IPPROTO_SCTP)
@six.add_metaclass(abc.ABCMeta)
class header(stringify.StringifyMixin):
"""extension header abstract class."""
def __init__(self, nxt):
self.nxt = nxt
@classmethod
@abc.abstractmethod
def parser(cls, buf):
pass
@abc.abstractmethod
def serialize(self):
pass
@abc.abstractmethod
def __len__(self):
pass
class opt_header(header):
"""an abstract class for Hop-by-Hop Options header and destination
header."""
_PACK_STR = '!BB'
_MIN_LEN = struct.calcsize(_PACK_STR)
_FIX_SIZE = 8
_class_prefixes = ['option']
@abc.abstractmethod
def __init__(self, nxt, size, data):
super(opt_header, self).__init__(nxt)
assert not (size % 8)
self.size = size
self.data = data
@classmethod
def parser(cls, buf):
(nxt, len_) = struct.unpack_from(cls._PACK_STR, buf)
data_len = cls._FIX_SIZE + int(len_)
data = []
size = cls._MIN_LEN
while size < data_len:
(type_, ) = struct.unpack_from('!B', buf[size:])
if type_ == 0:
opt = option(type_, -1, None)
size += 1
else:
opt = option.parser(buf[size:])
size += len(opt)
data.append(opt)
return cls(nxt, len_, data)
def serialize(self):
buf = struct.pack(self._PACK_STR, self.nxt, self.size)
buf = bytearray(buf)
if self.data is None:
self.data = [option(type_=1, len_=4,
data='\x00\x00\x00\x00')]
for opt in self.data:
buf.extend(opt.serialize())
return buf
def __len__(self):
return self._FIX_SIZE + self.size
@ipv6.register_header_type(inet.IPPROTO_HOPOPTS)
class hop_opts(opt_header):
"""IPv6 (RFC 2460) Hop-by-Hop Options header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the Hop-by-Hop Options header,
not include the first 8 octet.
data IPv6 options.
============== =======================================
"""
TYPE = inet.IPPROTO_HOPOPTS
def __init__(self, nxt=inet.IPPROTO_TCP, size=0, data=None):
super(hop_opts, self).__init__(nxt, size, data)
@ipv6.register_header_type(inet.IPPROTO_DSTOPTS)
class dst_opts(opt_header):
"""IPv6 (RFC 2460) destination header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the destination header,
not include the first 8 octet.
data IPv6 options.
============== =======================================
"""
TYPE = inet.IPPROTO_DSTOPTS
def __init__(self, nxt=inet.IPPROTO_TCP, size=0, data=None):
super(dst_opts, self).__init__(nxt, size, data)
class option(stringify.StringifyMixin):
"""IPv6 (RFC 2460) Options header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.hop_opts or
ryu.lib.packet.ipv6.dst_opts.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
type\_ option type.
len\_ the length of data. -1 if type\_ is 0.
data an option value. None if len\_ is 0 or -1.
============== =======================================
"""
_PACK_STR = '!BB'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, type_=0, len_=-1, data=None):
self.type_ = type_
self.len_ = len_
self.data = data
@classmethod
def parser(cls, buf):
(type_, ) = struct.unpack_from('!B', buf)
if not type_:
cls_ = cls(type_, -1, None)
else:
data = None
(type_, len_) = struct.unpack_from(cls._PACK_STR, buf)
if len_:
form = "%ds" % len_
(data, ) = struct.unpack_from(form, buf, cls._MIN_LEN)
cls_ = cls(type_, len_, data)
return cls_
def serialize(self):
data = None
if not self.type_:
data = struct.pack('!B', self.type_)
elif not self.len_:
data = struct.pack(self._PACK_STR, self.type_, self.len_)
else:
form = "%ds" % self.len_
data = struct.pack(self._PACK_STR + form, self.type_,
self.len_, self.data)
return data
def __len__(self):
return self._MIN_LEN + self.len_
@ipv6.register_header_type(inet.IPPROTO_ROUTING)
class routing(header):
"""An IPv6 Routing Header decoder class.
This class has only the parser method.
IPv6 Routing Header types.
http://www.iana.org/assignments/ipv6-parameters/ipv6-parameters.xhtml
+-----------+----------------------------------+-------------------+
| Value | Description | Reference |
+===========+==================================+===================+
| 0 | Source Route (DEPRECATED) | [[IPV6]][RFC5095] |
+-----------+----------------------------------+-------------------+
| 1 | Nimrod (DEPRECATED 2009-05-06) | |
+-----------+----------------------------------+-------------------+
| 2 | Type 2 Routing Header | [RFC6275] |
+-----------+----------------------------------+-------------------+
| 3 | RPL Source Route Header | [RFC6554] |
+-----------+----------------------------------+-------------------+
| 4 - 252 | Unassigned | |
+-----------+----------------------------------+-------------------+
| 253 | RFC3692-style Experiment 1 [2] | [RFC4727] |
+-----------+----------------------------------+-------------------+
| 254 | RFC3692-style Experiment 2 [2] | [RFC4727] |
+-----------+----------------------------------+-------------------+
| 255 | Reserved | |
+-----------+----------------------------------+-------------------+
"""
TYPE = inet.IPPROTO_ROUTING
_OFFSET_LEN = struct.calcsize('!2B')
# IPv6 Routing Header Type
ROUTING_TYPE_2 = 0x02
ROUTING_TYPE_3 = 0x03
@classmethod
def parser(cls, buf):
(type_, ) = struct.unpack_from('!B', buf, cls._OFFSET_LEN)
switch = {
# TODO: make parsers of type2.
cls.ROUTING_TYPE_2: None,
cls.ROUTING_TYPE_3: routing_type3
}
cls_ = switch.get(type_)
if cls_:
return cls_.parser(buf)
else:
return None
class routing_type3(header):
"""
An IPv6 Routing Header for Source Routes with the RPL (RFC 6554)
encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size The length of the Routing header,
not include the first 8 octet.
(0 means automatically-calculate when encoding)
type Identifies the particular Routing header variant.
seg Number of route segments remaining.
cmpi Number of prefix octets from segments 1 through n-1.
cmpe Number of prefix octets from segment n.
pad Number of octets that are used for padding
after Address[n] at the end of the SRH.
adrs Vector of addresses, numbered 1 to n.
============== =======================================
"""
_PACK_STR = '!BBBBBB2x'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, nxt=inet.IPPROTO_TCP, size=0,
type_=3, seg=0, cmpi=0, cmpe=0, adrs=None):
super(routing_type3, self).__init__(nxt)
self.size = size
self.type_ = type_
self.seg = seg
self.cmpi = cmpi
self.cmpe = cmpe
adrs = adrs or []
assert isinstance(adrs, list)
self.adrs = adrs
self._pad = (8 - ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) % 8)) % 8
@classmethod
def _get_size(cls, size):
return (int(size) + 1) * 8
@classmethod
def parser(cls, buf):
(nxt, size, type_, seg, cmp_, pad) = struct.unpack_from(
cls._PACK_STR, buf)
data = cls._MIN_LEN
header_len = cls._get_size(size)
cmpi = int(cmp_ >> 4)
cmpe = int(cmp_ & 0xf)
pad = int(pad >> 4)
adrs = []
if size:
# Address[1..n-1] has size (16 - CmprI) octets
adrs_len_i = 16 - cmpi
# Address[n] has size (16 - CmprE) octets
adrs_len_e = 16 - cmpe
form_i = "%ds" % adrs_len_i
form_e = "%ds" % adrs_len_e
while data < (header_len - (adrs_len_e + pad)):
(adr, ) = struct.unpack_from(form_i, buf[data:])
adr = ('\x00' * cmpi) + adr
adrs.append(addrconv.ipv6.bin_to_text(adr))
data += adrs_len_i
(adr, ) = struct.unpack_from(form_e, buf[data:])
adr = ('\x00' * cmpe) + adr
adrs.append(addrconv.ipv6.bin_to_text(adr))
return cls(nxt, size, type_, seg, cmpi, cmpe, adrs)
def serialize(self):
if self.size == 0:
self.size = ((len(self.adrs) - 1) * (16 - self.cmpi) +
(16 - self.cmpe) + self._pad) / 8
buf = struct.pack(self._PACK_STR, self.nxt, self.size,
self.type_, self.seg, (self.cmpi << 4) | self.cmpe,
self._pad << 4)
buf = bytearray(buf)
if self.size:
form_i = "%ds" % (16 - self.cmpi)
form_e = "%ds" % (16 - self.cmpe)
slice_i = slice(self.cmpi, 16)
slice_e = slice(self.cmpe, 16)
for adr in self.adrs[:-1]:
buf.extend(
struct.pack(
form_i, addrconv.ipv6.text_to_bin(adr)[slice_i]))
buf.extend(struct.pack(
form_e,
addrconv.ipv6.text_to_bin(self.adrs[-1])[slice_e]))
return buf
def __len__(self):
return routing_type3._get_size(self.size)
@ipv6.register_header_type(inet.IPPROTO_FRAGMENT)
class fragment(header):
"""IPv6 (RFC 2460) fragment header encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
offset offset, in 8-octet units, relative to
the start of the fragmentable part of
the original packet.
more 1 means more fragments follow;
0 means last fragment.
id\_ packet identification value.
============== =======================================
"""
TYPE = inet.IPPROTO_FRAGMENT
_PACK_STR = '!BxHI'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, nxt=inet.IPPROTO_TCP, offset=0, more=0, id_=0):
super(fragment, self).__init__(nxt)
self.offset = offset
self.more = more
self.id_ = id_
@classmethod
def parser(cls, buf):
(nxt, off_m, id_) = struct.unpack_from(cls._PACK_STR, buf)
offset = off_m >> 3
more = off_m & 0x1
return cls(nxt, offset, more, id_)
def serialize(self):
off_m = (self.offset << 3 | self.more)
buf = struct.pack(self._PACK_STR, self.nxt, off_m, self.id_)
return buf
def __len__(self):
return self._MIN_LEN
@ipv6.register_header_type(inet.IPPROTO_AH)
class auth(header):
"""IP Authentication header (RFC 2402) encoder/decoder class.
This is used with ryu.lib.packet.ipv6.ipv6.
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============== =======================================
Attribute Description
============== =======================================
nxt Next Header
size the length of the Authentication Header
in 64-bit words, subtracting 1.
spi security parameters index.
seq sequence number.
data authentication data.
============== =======================================
"""
TYPE = inet.IPPROTO_AH
_PACK_STR = '!BB2xII'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, nxt=inet.IPPROTO_TCP, size=2, spi=0, seq=0,
data='\x00\x00\x00\x00'):
super(auth, self).__init__(nxt)
assert data is not None
self.size = size
self.spi = spi
self.seq = seq
self.data = data
@classmethod
def _get_size(cls, size):
return (int(size) + 2) * 4
@classmethod
def parser(cls, buf):
(nxt, size, spi, seq) = struct.unpack_from(cls._PACK_STR, buf)
form = "%ds" % (cls._get_size(size) - cls._MIN_LEN)
(data, ) = struct.unpack_from(form, buf, cls._MIN_LEN)
return cls(nxt, size, spi, seq, data)
def serialize(self):
buf = struct.pack(self._PACK_STR, self.nxt, self.size, self.spi,
self.seq)
buf = bytearray(buf)
form = "%ds" % (auth._get_size(self.size) - self._MIN_LEN)
buf.extend(struct.pack(form, self.data))
return buf
def __len__(self):
return auth._get_size(self.size)
ipv6.set_classes(ipv6._IPV6_EXT_HEADER_TYPE)
|
{
"content_hash": "ff3ecb0c12146a48e0edd441f7b3a643",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 78,
"avg_line_length": 34.707191780821915,
"alnum_prop": 0.49380827865212884,
"repo_name": "noobcoderT/ryu-3.21",
"id": "8d0a82eb5632fe9598c49edb097cd6a79bf4c4c1",
"size": "20882",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "ryu/lib/packet/ipv6.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8269"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "871862"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "5241610"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from corehq.apps.groups.models import Group
from dimagi.utils.decorators.memoized import memoized
from pillowtop.listener import AliasedElasticPillow
from .mappings.group_mapping import GROUP_INDEX, GROUP_MAPPING
from .base import HQPillow
class GroupPillow(HQPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = Group
couch_filter = "groups/all_groups"
es_index_prefix = "hqgroups"
es_alias = "hqgroups"
es_type = "group"
es_index = GROUP_INDEX
default_mapping = GROUP_MAPPING
|
{
"content_hash": "1c71d8c62171abacf50c9e62f4563d96",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7386759581881533,
"repo_name": "gmimano/commcaretest",
"id": "40f9f1387e839679ee4202d895e8bab45afa0626",
"size": "574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "corehq/pillows/group.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
}
|
"""The tests for the MQTT discovery."""
from unittest.mock import patch
from homeassistant.components import mqtt
from homeassistant.components.mqtt.discovery import (
ALREADY_DISCOVERED, async_start)
from homeassistant.const import STATE_OFF, STATE_ON
from tests.common import MockConfigEntry, async_fire_mqtt_message, mock_coro
async def test_subscribing_config_topic(hass, mqtt_mock):
"""Test setting up discovery."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker'
})
hass_config = {}
discovery_topic = 'homeassistant'
await async_start(hass, discovery_topic, hass_config, entry)
assert mqtt_mock.async_subscribe.called
call_args = mqtt_mock.async_subscribe.mock_calls[0][1]
assert call_args[0] == discovery_topic + '/#'
assert call_args[2] == 0
async def test_invalid_topic(hass, mqtt_mock):
"""Test sending to invalid topic."""
with patch('homeassistant.components.mqtt.discovery.async_load_platform')\
as mock_load_platform:
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker'
})
mock_load_platform.return_value = mock_coro()
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(
hass, 'homeassistant/binary_sensor/bla/not_config', '{}')
await hass.async_block_till_done()
assert not mock_load_platform.called
async def test_invalid_json(hass, mqtt_mock, caplog):
"""Test sending in invalid JSON."""
with patch('homeassistant.components.mqtt.discovery.async_load_platform')\
as mock_load_platform:
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={
mqtt.CONF_BROKER: 'test-broker'
})
mock_load_platform.return_value = mock_coro()
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'not json')
await hass.async_block_till_done()
assert 'Unable to parse JSON' in caplog.text
assert not mock_load_platform.called
async def test_only_valid_components(hass, mqtt_mock, caplog):
"""Test for a valid component."""
with patch('homeassistant.components.mqtt.discovery.async_load_platform')\
as mock_load_platform:
entry = MockConfigEntry(domain=mqtt.DOMAIN)
invalid_component = "timer"
mock_load_platform.return_value = mock_coro()
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/{}/bla/config'.format(
invalid_component
), '{}')
await hass.async_block_till_done()
assert 'Component {} is not supported'.format(
invalid_component
) in caplog.text
assert not mock_load_platform.called
async def test_correct_config_discovery(hass, mqtt_mock, caplog):
"""Test sending in correct JSON."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Beer'
assert ('binary_sensor', 'bla') in hass.data[ALREADY_DISCOVERED]
async def test_discover_fan(hass, mqtt_mock, caplog):
"""Test discovering an MQTT fan."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/fan/bla/config',
('{ "name": "Beer",'
' "command_topic": "test_topic" }'))
await hass.async_block_till_done()
state = hass.states.get('fan.beer')
assert state is not None
assert state.name == 'Beer'
assert ('fan', 'bla') in hass.data[ALREADY_DISCOVERED]
async def test_discover_climate(hass, mqtt_mock, caplog):
"""Test discovering an MQTT climate component."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "ClimateTest",'
' "current_temperature_topic": "climate/bla/current_temp",'
' "temperature_command_topic": "climate/bla/target_temp" }'
)
async_fire_mqtt_message(hass, 'homeassistant/climate/bla/config', data)
await hass.async_block_till_done()
state = hass.states.get('climate.ClimateTest')
assert state is not None
assert state.name == 'ClimateTest'
assert ('climate', 'bla') in hass.data[ALREADY_DISCOVERED]
async def test_discover_alarm_control_panel(hass, mqtt_mock, caplog):
"""Test discovering an MQTT alarm control panel component."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "AlarmControlPanelTest",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
async_fire_mqtt_message(
hass, 'homeassistant/alarm_control_panel/bla/config', data)
await hass.async_block_till_done()
state = hass.states.get('alarm_control_panel.AlarmControlPanelTest')
assert state is not None
assert state.name == 'AlarmControlPanelTest'
assert ('alarm_control_panel', 'bla') in hass.data[ALREADY_DISCOVERED]
async def test_discovery_incl_nodeid(hass, mqtt_mock, caplog):
"""Test sending in correct JSON with optional node_id included."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/my_node_id/bla'
'/config', '{ "name": "Beer" }')
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
assert state is not None
assert state.name == 'Beer'
assert ('binary_sensor', 'my_node_id bla') in hass.data[ALREADY_DISCOVERED]
async def test_non_duplicate_discovery(hass, mqtt_mock, caplog):
"""Test for a non duplicate component."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/config',
'{ "name": "Beer" }')
await hass.async_block_till_done()
state = hass.states.get('binary_sensor.beer')
state_duplicate = hass.states.get('binary_sensor.beer1')
assert state is not None
assert state.name == 'Beer'
assert state_duplicate is None
assert 'Component has already been discovered: ' \
'binary_sensor bla' in caplog.text
async def test_discovery_expansion(hass, mqtt_mock, caplog):
"""Test expansion of abbreviated discovery payload."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "~": "some/base/topic",'
' "name": "DiscoveryExpansionTest1",'
' "stat_t": "test_topic/~",'
' "cmd_t": "~/test_topic",'
' "dev":{'
' "ids":["5706DF"],'
' "name":"DiscoveryExpansionTest1 Device",'
' "mdl":"Generic",'
' "sw":"1.2.3.4",'
' "mf":"Noone"'
' }'
'}'
)
async_fire_mqtt_message(
hass, 'homeassistant/switch/bla/config', data)
await hass.async_block_till_done()
state = hass.states.get('switch.DiscoveryExpansionTest1')
assert state is not None
assert state.name == 'DiscoveryExpansionTest1'
assert ('switch', 'bla') in hass.data[ALREADY_DISCOVERED]
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, 'test_topic/some/base/topic',
'ON')
state = hass.states.get('switch.DiscoveryExpansionTest1')
assert state.state == STATE_ON
async def test_implicit_state_topic_alarm(hass, mqtt_mock, caplog):
"""Test implicit state topic for alarm_control_panel."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Test1",'
' "command_topic": "homeassistant/alarm_control_panel/bla/cmnd"'
'}'
)
async_fire_mqtt_message(
hass, 'homeassistant/alarm_control_panel/bla/config', data)
await hass.async_block_till_done()
assert (
'implicit state_topic is deprecated, add '
'"state_topic":"homeassistant/alarm_control_panel/bla/state"'
in caplog.text)
state = hass.states.get('alarm_control_panel.Test1')
assert state is not None
assert state.name == 'Test1'
assert ('alarm_control_panel', 'bla') in hass.data[ALREADY_DISCOVERED]
assert state.state == 'unknown'
async_fire_mqtt_message(
hass, 'homeassistant/alarm_control_panel/bla/state', 'armed_away')
state = hass.states.get('alarm_control_panel.Test1')
assert state.state == 'armed_away'
async def test_implicit_state_topic_binary_sensor(hass, mqtt_mock, caplog):
"""Test implicit state topic for binary_sensor."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Test1"'
'}'
)
async_fire_mqtt_message(
hass, 'homeassistant/binary_sensor/bla/config', data)
await hass.async_block_till_done()
assert (
'implicit state_topic is deprecated, add '
'"state_topic":"homeassistant/binary_sensor/bla/state"'
in caplog.text)
state = hass.states.get('binary_sensor.Test1')
assert state is not None
assert state.name == 'Test1'
assert ('binary_sensor', 'bla') in hass.data[ALREADY_DISCOVERED]
assert state.state == 'off'
async_fire_mqtt_message(hass, 'homeassistant/binary_sensor/bla/state',
'ON')
state = hass.states.get('binary_sensor.Test1')
assert state.state == 'on'
async def test_implicit_state_topic_sensor(hass, mqtt_mock, caplog):
"""Test implicit state topic for sensor."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Test1"'
'}'
)
async_fire_mqtt_message(
hass, 'homeassistant/sensor/bla/config', data)
await hass.async_block_till_done()
assert (
'implicit state_topic is deprecated, add '
'"state_topic":"homeassistant/sensor/bla/state"'
in caplog.text)
state = hass.states.get('sensor.Test1')
assert state is not None
assert state.name == 'Test1'
assert ('sensor', 'bla') in hass.data[ALREADY_DISCOVERED]
assert state.state == 'unknown'
async_fire_mqtt_message(hass, 'homeassistant/sensor/bla/state',
'1234')
state = hass.states.get('sensor.Test1')
assert state.state == '1234'
async def test_no_implicit_state_topic_switch(hass, mqtt_mock, caplog):
"""Test no implicit state topic for switch."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, 'homeassistant', {}, entry)
data = (
'{ "name": "Test1",'
' "command_topic": "cmnd"'
'}'
)
async_fire_mqtt_message(
hass, 'homeassistant/switch/bla/config', data)
await hass.async_block_till_done()
assert (
'implicit state_topic is deprecated'
not in caplog.text)
state = hass.states.get('switch.Test1')
assert state is not None
assert state.name == 'Test1'
assert ('switch', 'bla') in hass.data[ALREADY_DISCOVERED]
assert state.state == 'off'
assert state.attributes['assumed_state'] is True
async_fire_mqtt_message(hass, 'homeassistant/switch/bla/state',
'ON')
state = hass.states.get('switch.Test1')
assert state.state == 'off'
|
{
"content_hash": "e28ec36116b173cac3cdd7c085c6bf4b",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 79,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.6345060558622394,
"repo_name": "MartinHjelmare/home-assistant",
"id": "42513a2e9007d21dbf942622bd3b3cb2eef94f60",
"size": "12137",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15222591"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
'''
This module includes the implementation of an object which periodically sends
events and also sends events in response to receiving a particular type of
event.
TODO LIST:
- generating the event using user-defined code does not work yet
- there is some problem with the Scheduler where events are being sent
too quickly.
'''
#--REGULAR IMPORTS-------------------------------------------------------------
from operator import isSequenceType
from traceback import print_exc
from random import randrange
from time import sleep
#--CORBA STUBS-----------------------------------------------------------------
#--ACS Imports-----------------------------------------------------------------
from Acssim.Goodies import supplyEventByType, supplyEventByInstance
from Acspy.Util.Scheduler import Scheduler
from Acssim.Goodies import getComponentXMLObj
from Acssim.Goodies import getCompLocalNSList
from Acssim.Corba.Utilities import listToCodeObj
from Acspy.Common.Log import getLogger
from Acspy.Common.Log import acsPrintExcDebug
from Acspy.Nc.Consumer import Consumer
#--GLOBALS---------------------------------------------------------------------
#a single scheduler will publish all events
SCHEDULER = Scheduler()
#------------------------------------------------------------------------------
class EventDispatcher:
'''
EventDispatcher dispatches events at given frequencies and also sends
events in reponse to receiving an event of a particular type.
'''
def __init__(self, comp_ref):
'''
Constructor
Parameters: comp_ref - reference to the component
Raises: ???
'''
#component reference
self.comp_ref = comp_ref
#the component's name which we need for the CDB
self.comp_name = self.comp_ref._get_name()
#our own personal logger
self.logger = getLogger("EventDispatcher (" +
self.comp_name + ")")
#list of all timeouts we have scheduled. used at destruction
self.timeout_ids = []
#maps consumer objects to channels.
self.consumers = {}
#delegate the logic determining when events will be sent
#to this helper method
self.setupEventDispatching()
def destroy(self):
'''
Destroys this object: cancels all timeouts and destroys all consumers.
Params: None
Returns: Nothing
Raises: ???
'''
#first cancel all the scheduled timeouts
for timeout_id in self.timeout_ids:
SCHEDULER.cancelTimeout(timeout_id)
#next destroy all the consumers
for consumer in self.consumers.values():
consumer.disconnect()
def setupEventDispatching(self):
'''
Helper method sets up event dispatching using info found in the
ACS CDB.
'''
self.logger.logInfo("Setting up event dispatching.")
#get the xml object which describes the event frequencies and
#reponses
xml_obj = getComponentXMLObj(self.comp_name)
#sanity check
if xml_obj == None:
self.logger.logDebug("No CDB entry found. Bailing.")
return
#just delegate to other helper methods
self.handleFrequencies(xml_obj)
self.handleResponses(xml_obj)
def handleFrequencies(self, xml_obj):
'''
Helper method used to setup events sent at certain
frequencies.
Params: xml_obj - an XMLObjectifier object conforming to the
SimulatedComponent.xsd schema.
Returns: Nothing
Raises: ???
'''
#events is the somewhat formatted data taken from the XML. not really
#nice enough to work with yet.
try:
events = xml_obj.SimulatedComponent._almaEvent
if isSequenceType(events)==0:
events = [ events ]
except:
self.logger.logDebug("No event frequencies defined.")
return
#cycle through all the events
for event in events:
#extract the channel name
channel_name = event.getAttribute('Channel')
#extract the IFR ID
ifr_id = event.getAttribute('ID')
#extract the rate at which events will be sent at
frequency = float(event.getAttribute('Frequency'))
event_instance = self.eventFunctionHelper(event, ifr_id)
if event_instance != None:
self.scheduleEventByInstance(channel_name, event_instance, frequency)
else:
#this is perfectly OK. end-user did not define a function to
#generate events
self.scheduleEventByType(channel_name, ifr_id, frequency)
def handleResponses(self, xml_obj):
'''
Helper method used to send events in response to receiving events
of a certain type.
Params: xml_obj - an XMLObjectifier object conforming to the
SimulatedComponent.xsd schema.
Returns: Nothing
Raises: ???
'''
#events is the somewhat formatted data taken from the XML. not really
#nice enough to work with yet.
try:
events = xml_obj.SimulatedComponent._almaEventResponse
if isSequenceType(events)==0:
events = [ events ]
except:
self.logger.logDebug("No event frequencies defined.")
return
#cycle through all the events
for event in events:
#extract the incoming channel name
incoming_channel_name = event.getAttribute('IncomingChannel')
#extract the incoming IFR ID
incoming_ifr_id = event.getAttribute('IncomingID')
#extract the outgoing channel name
outgoing_channel_name = event.getAttribute('OutgoingChannel')
#extract the outgoing IFR ID
outgoing_ifr_id = event.getAttribute('OutgoingID')
#how long to wait beforse sending the event
delay = float(event.getAttribute('Delay'))
#the chance of any given event not being sent
missed_event_chance = float(event.getAttribute('MissedEventChance'))
self.logger.logInfo("'" + outgoing_ifr_id +
"' events will be sent to the '" +
outgoing_channel_name + "' channel after '" +
str(delay) + "' seconds with a '" +
str(missed_event_chance) +
"' probabily of being skipped when events of type '" +
incoming_ifr_id +
"' are received on the '" +
incoming_channel_name + "' channel.")
#get an instance of the event
event_instance = self.eventFunctionHelper(event, incoming_ifr_id)
#delegate to another helper method
self.responseHelper(incoming_channel_name, incoming_ifr_id,
outgoing_channel_name, outgoing_ifr_id,
delay,
missed_event_chance,
event_instance)
return
def eventFunctionHelper(self, event, ifr_id):
'''
Returns an event instance or None based off the contents of
an _almaEvent or _almaEventReponse XML element (DOM).
'''
#here comes the fun part...it might be necessary to dynamically
#create the object now!
#get the code to be executed yielding a return value
try:
#if the following line of code throws an exception,
#it's not really a big deal. it just means that
#no function was defined within the XML element to
#generate the event instance
value = event.getValue().rstrip().lstrip().split('\n')
#this next block is wrapped in a separate try/except
#because it's possible that the end-user has problems
#in their function definition.
try:
_locals = {}
#attach all imports to the function definition
value = getCompLocalNSList(self.comp_name) + value
#make the code list a function in the _locals namespace
value = listToCodeObj(value, _locals)
#create the function
exec value in globals(), _locals
#execute the function as well to get the event instance
exec "joe = stringFunction([])" in globals(), _locals
event_instance = _locals['joe']
except Exception, e:
#the function definition given by the end-user was bad!
#warn them and schedule dynamic events instead
acsPrintExcDebug()
self.logger.logCritical("Something was wrong within the function definition for the '" +
ifr_id +
"' event type.")
self.logger.logInfo("Will try dynamically creating the event instead.")
#just rethrow e so the next block catches it
raise e
except:
event_instance = None
return event_instance
def responseHelper(self, incoming_channel_name, incoming_ifr_id,
outgoing_channel_name, outgoing_ifr_id,
delay, missed_event_chance,
event_instance):
'''
A fairly complex helper method which:
- adds a subscription to a consumer for the incoming_ifr_id
event type
- whenever an event is received by the consumer, an event of
outgoing_ifr_id type is PROBABLY published on the
outgoing_channel_name channel after 'delay' seconds
- there's a chance the event will not be published at all
if 'missed_event_chance' is close to 1.0.
'''
#sanity check to ensure a consumer is around
if not self.consumers.has_key(incoming_channel_name):
#add a consumer
consumer = Consumer(incoming_channel_name)
consumer.consumerReady()
self.consumers[incoming_channel_name] = consumer
#consumer
cons = self.consumers[incoming_channel_name]
#define the event handler method
def eventHandler(data):
'''
This event handler method:
- checks the probability to ensure an event should actually
be published.
- pauses for a set amount of time
- sends another event presumably of a different type
'''
#first check the probability to see if we can
#ignore the call entirely
ran_float = (randrange(0,100))/100.0
if ran_float > missed_event_chance:
#bail
self.logger.logDebug("Randomly skipped an event: " +
str(ran_float))
return
self.logger.logDebug("Publishing a '" + outgoing_ifr_id +
"' event on the '" + outgoing_channel_name +
"' in response to receiving an event of type'" +
incoming_ifr_id + "' on the '" +
incoming_channel_name)
#first we sleep
sleep(delay)
#send an event in response...
if event_instance==None:
supplyEventByType(self.comp_name, outgoing_channel_name, outgoing_ifr_id)
else:
supplyEventByInstance(self.comp_name, outgoing_channel_name, event_instance)
return #ends the function definition
#add the subscription; first getting at the event type
event_type = incoming_ifr_id.split(":")[1].split('/').pop()
#now add the subscription with the function we just defined
cons.addSubscription(event_type, eventHandler)
def scheduleEventByType(self, channel_name, ifr_id, frequency):
'''
Schedules event transmissions by type.
'''
self.logger.logInfo("Sending '" + ifr_id +
"' events on the '" + channel_name + "' channel" +
" at a rate of '" + str(frequency) +
"' per second.")
id = SCHEDULER.scheduleTimeout(supplyEventByType,
0L,
frequency * 10000000.0,
(self.comp_name, channel_name, ifr_id))
self.timeout_ids.append(id)
return
def scheduleEventByInstance(self, channel_name, event_instance, frequency):
'''
Schedules event transmissions by type.
'''
self.logger.logInfo("Sending '" + event_instance._NP_RepositoryId +
"' events on the '" + channel_name + "' channel" +
" at a rate of '" + str(frequency) +
"' per second.")
id = SCHEDULER.scheduleTimeout(supplyEventByInstance,
0L,
frequency * 10000000.0,
(self.comp_name, channel_name, event_instance))
self.timeout_ids.append(id)
return
|
{
"content_hash": "d4d9532ae99745e75bf7b905f0e2ed67",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 107,
"avg_line_length": 40.146067415730336,
"alnum_prop": 0.5272180240694094,
"repo_name": "csrg-utfsm/acscb",
"id": "210585a4fbded7090b18d3c596c646eba23c3d5c",
"size": "15475",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acssim/src/Acssim/Corba/EventDispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
}
|
from common.dbhelper import SQLTable
def get_code_for_title(title, tablename):
return get_instance().get_code_for_title(title, tablename)
def get_title_for_code(code, tablename):
return get_instance().get_title_for_code(code, tablename)
def add_tracker(tablename, mode):
return get_instance().add_tracker(tablename, mode)
# this is a weird way to implement singletons...
# http://www.python.org/workshops/1997-10/proceedings/savikko.html
def get_instance():
try:
single = StaticCodes()
except StaticCodes as s:
single = s
return single
class StaticCodes(Exception):
__static = None
def __init__(self):
if StaticCodes.__static:
raise StaticCodes.__static
self.code_trackers = {}
StaticCodes.__static = self
def get_instance():
return StaticCodes.__static
def add_tracker(self, tablename, mode):
self.code_trackers[tablename] = SectorCodes(tablename, mode).setup()
return self.code_trackers[tablename]
def get_title_for_code(self, code, tablename):
if tablename not in self.code_trackers:
self.add_tracker(tablename, "r")
return self.code_trackers[tablename].get_title_for_code(code)
def get_code_for_title(self, title, tablename):
if tablename not in self.code_trackers:
self.add_tracker(tablename, "r")
return self.code_trackers[tablename].get_code_for_title(title)
class SectorCodes:
def __init__(self, codetablename, mode="r"):
self.mode = mode
self.codetable = SQLTable(
codetablename,
["code", "description"],
["varchar(15)", "varchar(255)"])
self.code_dict = {}
self.reverse_code_dict = {}
self.setup()
def setup(self):
if self.mode == "w":
# invalid codes or codes that we don't want to record
self.code_blacklist = []
# if we want to override the code provided with something
# we make up (or from another set) based on the description
self.manual_codes = {}
self.codetable.create()
# get existing codes from db
for (code, desc) in self.codetable.getall():
self.code_dict[code] = desc
self.reverse_code_dict[desc] = code
return self
# for write mode
def blacklist_code(self, code):
self.code_blacklist.append(code)
if code in self.code_dict:
del self.code_dict[code]
def set_blacklist(self, code_blacklist):
self.code_blacklist = []
for code in code_blacklist:
self.blacklist_code(code)
def curate_code_from_desc(self, desc, code):
self.manual_codes[desc] = code
self.code_dict[code] = desc
self.reverse_code_dict[desc] = code
def add_curated_codes(self, curated_codes):
for (desc, code) in curated_codes.items():
self.curate_code_from_desc(desc, code)
# returns the code used if it was recognized, false otherwise
def set_code(self, code, desc):
if type(code) is str:
code = code.strip()
elif type(code) is float:
code = str(int(code))
if type(desc) is str:
desc = desc.strip()
if desc in self.manual_codes:
code = self.manual_codes[desc]
if code is None or not len(code):
if desc is None or not len(desc): # ignore empty args
return False
else:
return False
elif code in self.code_blacklist:
return False
if code in self.code_dict and self.code_dict[code] != desc:
# this is to check for blatant differences
print(self.code_dict[code], "=>", desc)
self.code_dict[code] = desc
# there may be more than one description for the same code
self.reverse_code_dict[desc] = code
return code
def has_code(self, code):
return code in self.code_dict
def get_code_for_title(self, desc):
if desc in self.reverse_code_dict:
return self.reverse_code_dict[desc]
def get_title_for_code(self, code):
if self.has_code(code):
return self.code_dict[code]
return False
def update_codes(self):
if self.mode != "w":
raise Exception("SectorCodes created in read-only mode")
self.codetable.truncate()
for code in sorted(self.code_dict.keys()):
desc = self.code_dict[code]
self.codetable.insert([code, desc])
class HybridTableCreator:
def __init__(self, schema):
self.schema = schema
self.io_prefix = "ixi"
self.env_prefix = "env"
self.io_tables = {}
self.env_tables = {}
def new_sector_codes(self, year=None, prefix=None):
if prefix is None:
prefix = self.io_prefix
if year is None:
tablename = "%s.%s_codes" % (self.schema, prefix)
else:
tablename = "%s.%s_codes_%d" % (self.schema, prefix, year)
return add_tracker(tablename, "w")
def valid_year(self, year):
if type(year) is str and not regexes.is_int(year):
raise Exception("invalid year " + str(year))
year = int(year)
if year < 1800 or year > 2050:
raise Exception("invalid year " + str(year))
return year
def add_env_table(self, year, sector_max_length=15,
series_max_length=15):
year = self.valid_year(year)
if year not in self.env_tables:
tablename = "%s.%s_%d" % (self.schema, self.env_prefix, year)
colnames = ["sector", "series", "value"]
coltypes = [
"varchar(%d)" % sector_max_length,
"varchar(%d)" % series_max_length,
"float"
]
self.env_tables[year] = SQLTable(
tablename, colnames, coltypes).create()
self.env_tables[year].truncate()
def insert_env(self, year, sector, series, value):
if sector and series and value != 0:
self.env_tables[year].insert([sector, series, value])
def add_io_table(self, year, sector_max_length=15):
year = self.valid_year(year)
if year not in self.io_tables:
tablename = "%s.%s_%d" % (self.schema, self.io_prefix, year)
colnames = ["from_sector", "to_sector", "value"]
coltypes = [
"varchar(%d)" % sector_max_length,
"varchar(%d)" % sector_max_length,
"float"]
self.io_tables[year] = SQLTable(
tablename, colnames, coltypes).create()
self.io_tables[year].truncate()
def insert_io(self, year, from_sector, to_sector, value):
if from_sector and to_sector and value != 0:
self.io_tables[year].insert([from_sector, to_sector, value])
|
{
"content_hash": "242bac1ef41d806ee4e054db797f1006",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 76,
"avg_line_length": 31.638009049773757,
"alnum_prop": 0.5765160183066361,
"repo_name": "sonya/eea",
"id": "50c860381cf76456b6e8d5d17f623859172f9050",
"size": "7572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/common/parserutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "441743"
},
{
"name": "Shell",
"bytes": "31869"
}
],
"symlink_target": ""
}
|
import jinja2
from flask import Flask
from flask import jsonify
from flask import request
import model
import report_generator
app = Flask(__name__)
app.config['DEBUG'] = True
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
@app.route('/report/view/<username>')
def view_report(username):
user = model.User.get_by_id(username)
if not user:
error = jsonify({'error': 'User "%s" does not exist' % username})
error.status_code = 400;
return error
test_results = model.TestResult.query(ancestor=user.key).fetch()
if not test_results:
error = jsonify({'error': 'User "%s" did not take a test' % username})
error.status_code = 400;
return error
return jsonify({'report':report_generator.GetUserReport(user, test_results)})
@app.route('/report/get_summary')
def get_summary():
# Get all the test results and return them as json. Offload all the
# processing to the client.
response_data = []
results = model.TestResult.query().fetch()
for result in results:
for answer in result.answers:
# exclude "user": it's a Key object that can't be serialized
response_data.append(answer.to_dict(exclude=['user']))
return jsonify({'answers': response_data})
def JsonifyDrilldownData(results, comparator):
response_data = []
for result in results:
for answer in result.answers:
if comparator(answer):
# Exclude "user": it's a Key object that can't be serialized.
datum = answer.to_dict(exclude=['user'])
# Convert it to a string instead.
datum['username'] = str(answer.user.get().name)
response_data.append(datum)
return jsonify({'answers': response_data})
@app.route('/report/drilldown/word/<word>')
def get_drilldown_word(word):
return JsonifyDrilldownData(model.TestResult.query(
model.TestResult.answers.expected==word).fetch(),
lambda answer: answer.expected == word)
@app.route('/report/drilldown/category/<category>')
def get_drilldown_category(category):
return JsonifyDrilldownData(model.TestResult.query(
model.TestResult.answers.category==category).fetch(),
lambda answer: answer.category == category)
@app.route('/report/list_users')
def list_users():
users = model.User.query().order(
model.User.time_created).fetch(projection=[model.User.name])
usernames = [str(user.name) for user in users]
return jsonify({'usernames': usernames})
|
{
"content_hash": "d0538c902aea9caa7bc01875ecfed171",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 30.317073170731707,
"alnum_prop": 0.6938857602574416,
"repo_name": "rekab/papt",
"id": "b3aff2843ed2cb23f6fbfbadbf8d7de4dfeee9d5",
"size": "2486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6553"
},
{
"name": "HTML",
"bytes": "12385"
},
{
"name": "JavaScript",
"bytes": "28768"
},
{
"name": "Python",
"bytes": "33930"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
from xml.etree import ElementTree
from xml.dom import minidom
import json
import string
import re
import sys
import tempfile
def prettify(rawxml, indent=" "):
"""Return a pretty-printed XML string"""
reparsed = minidom.parseString(rawxml)
return reparsed.toprettyxml(indent)
def nodeid(tid):
return tid.replace(".", "_")
DISPLAY_ATTRS = {
# http://www.graphviz.org/content/attrs
# Nodes
"Class": "shape=hexagon,width=2.5",
"AttrOrParam": "shape=box,width=2",
"EnumNode": "shape=octagon",
"EnumType": "shape=doubleoctagon",
# Edges
"Super": "color=red,fontcolor=red"
}
IGNORED_EDGE_TAGS = ["ElementInPackage", "AllowedInBasket", "AxisSpec", "Unit"]
class ImdParser():
def __init__(self, fn):
self._tree = ElementTree.parse(fn)
# Extract default namespace from root e.g.
# {http://www.interlis.ch/INTERLIS2.3}TRANSFER
self._ns = {
'xmlns': re.match(r'^{(.+)}', self._tree.getroot().tag).group(1)}
def models(self):
modelnodes = self._tree.findall(
"xmlns:DATASECTION/xmlns:IlisMeta07.ModelData/xmlns:IlisMeta07.ModelData.Model", self._ns) or []
models = map(
lambda n: n.find("xmlns:Name", self._ns).text, modelnodes)
return models
def gen_empty_transfer(self, version=2.3):
models = self.models()
transfer = """<?xml version="1.0" encoding="UTF-8"?>
<TRANSFER xmlns="http://www.interlis.ch/INTERLIS{version}">
<HEADERSECTION SENDER="ogrtools" VERSION="{version}">
<MODELS>""".format(version=version)
for model in models:
transfer += """<MODEL NAME="{name}"></MODEL>""".format(name=model)
transfer += """</MODELS>
</HEADERSECTION>
<DATASECTION>
</DATASECTION>
</TRANSFER>"""
return transfer
def gen_empty_transfer_file(self, version=2.3):
transfer = self.gen_empty_transfer(version)
__, transferfn = tempfile.mkstemp(suffix='.xtf')
f = open(transferfn, "w")
f.write(transfer)
f.close()
return transferfn
def extract_enums(self):
"""Extract Interlis Enumerations"""
enum_tables = {}
self._enum_types = self._tree.findall(
"xmlns:DATASECTION/xmlns:IlisMeta07.ModelData/xmlns:IlisMeta07.ModelData.EnumType", self._ns)
self._enum_nodes = self._tree.findall(
"xmlns:DATASECTION/xmlns:IlisMeta07.ModelData/xmlns:IlisMeta07.ModelData.EnumNode", self._ns)
if self._enum_nodes is not None:
# Collect parent enums (only leaf nodes have to be added as enums)
parent_nodes = set()
for enumNode in self._enum_nodes:
parent = enumNode.find("xmlns:ParentNode", self._ns)
if parent is not None:
parent_nodes.add(parent.get("REF"))
# Collect top self._enum_nodes
self._top_nodes = {} # top node => [leaf nodes]
for enumNode in self._enum_nodes:
parent = enumNode.find("xmlns:ParentNode", self._ns)
if parent is None:
self._top_nodes[enumNode] = []
# Collect leafs
for enumNode in self._enum_nodes:
top_node = self._find_top_node(enumNode)
if enumNode.get("TID") not in parent_nodes:
leafs = self._top_nodes[top_node]
leafs.append(enumNode)
for top_node in self._top_nodes.keys():
enum_table = []
self._collect_enums(top_node, enum_table, 0)
enumTypeName = top_node.find(
"xmlns:EnumType", self._ns).get('REF')
enumTypeName = string.replace(enumTypeName, '.TYPE', '')
enum_tables[enumTypeName] = enum_table
return enum_tables
def _find_top_node(self, enumNode):
# <IlisMeta07.ModelData.EnumNode TID="RoadsExdm2ien.RoadsExtended.RoadSign.Type.TYPE.TOP.prohibition.noparking">
# <Name>noparking</Name>
# <Abstract>false</Abstract>
# <Final>false</Final>
# <ParentNode REF="RoadsExdm2ien.RoadsExtended.RoadSign.Type.TYPE.TOP.prohibition" ORDER_POS="2" />
# </IlisMeta07.ModelData.EnumNode>
if enumNode in self._top_nodes:
return enumNode
else:
parent_tid = enumNode.find("xmlns:ParentNode", self._ns).get('REF')
for node in self._enum_nodes:
if parent_tid == node.get('TID'):
return self._find_top_node(node)
def _collect_enums(self, top_node, enum_table, idx):
"""Add leafes of top_node to enum_table"""
# Find enum type
enumTypeName = top_node.find("xmlns:EnumType", self._ns).get('REF')
for node in self._enum_types:
if enumTypeName == node.get('TID'):
enumType = node
break
# Handle type inheritance
superRef = enumType.find("xmlns:Super", self._ns)
if superRef is not None:
superTypeName = superRef.get('REF')
for node in self._top_nodes.keys():
if superTypeName == node.find("xmlns:EnumType", self._ns).get('REF'):
idx = self._collect_enums(node, enum_table, idx)
break
# Add leafes
for enumNode in self._top_nodes[top_node]:
enum_record = {}
enum_record["id"] = idx # str(idx)
idx = idx + 1
enum = string.replace(
enumNode.get("TID"), top_node.get("TID") + '.', '')
enum_record["enum"] = enum
enum_record["enumtxt"] = enum
enum_table.append(enum_record)
return idx
def extract_enums_asgml(self):
"""Extract Interlis Enumerations as GML"""
enum_tables = self.extract_enums()
# GML output
gml = ElementTree.Element('FeatureCollection')
gml.set('xmlns', 'http://ogr.maptools.org/')
gml.set('xmlns:gml', 'http://www.opengis.net/gml')
#<ogr:FeatureCollection
# xmlns:ogr="http://ogr.maptools.org/"
# xmlns:gml="http://www.opengis.net/gml">
enumIdx = 0
for name, defs in enum_tables.items():
# enum name should not be longer than 63 chars, which is PG default name limit
# Nutzungsplanung.Nutzungsplanung.Grundnutzung_Zonenflaeche.Herkunft.TYPE
# -> enumXX_herkunft
enumTypeName = string.rsplit(name, '.', maxsplit=1)[-1]
curEnumName = "enum%d_%s" % (enumIdx, enumTypeName)
enumIdx = enumIdx + 1
for enumdef in defs:
# <gml:featureMember>
# <ogr:Grundzonen__GrundZonenCode__ZonenArt>
# <ogr:value>Dorfkernzone</ogr:value><ogr:id>0</ogr:id>
# </ogr:Grundzonen__GrundZonenCode__ZonenArt>
# </gml:featureMember>
featureMember = ElementTree.SubElement(
gml, "gml:featureMember")
feat = ElementTree.SubElement(featureMember, curEnumName)
id = ElementTree.SubElement(feat, "id")
id.text = str(enumdef['id'])
enum = ElementTree.SubElement(feat, "enum")
enum.text = enumdef['enum']
enumtxt = ElementTree.SubElement(feat, "enumtxt")
enumtxt.text = enumdef['enumtxt']
return ElementTree.tostring(gml, 'utf-8')
def imd_to_dot(self):
"""Generate dot graph from IlisMeta file"""
print "digraph {"
models = self._tree.findall(
"xmlns:DATASECTION/xmlns:IlisMeta07.ModelData", self._ns) or []
modelno = 0
for model in models:
taggroup = {}
bid = nodeid(model.get("BID"))
if bid == 'MODEL_INTERLIS':
continue
print "subgraph {"
modelno = modelno + 1
print "node [style=filled,colorscheme=accent8,fillcolor={}]".format(modelno)
for node in model:
tag = node.tag.replace(
"{http://www.interlis.ch/INTERLIS2.3}IlisMeta07.ModelData.", "")
multistr = ""
multi = node.find("xmlns:Multiplicity", self._ns)
if multi is not None:
min = ""
max = ""
minnode = multi.find("./*/xmlns:Min", self._ns)
if minnode is not None:
min = minnode.text
maxnode = multi.find("./*/xmlns:Max", self._ns)
if maxnode is not None:
max = maxnode.text
multistr = " [{min}..{max}]".format(min=min, max=max)
if node.get("TID"):
tid = nodeid(node.get("TID"))
name = node.find("xmlns:Name", self._ns).text
if tag not in taggroup:
taggroup[tag] = []
taggroup[tag].append(tid)
display_attrs = DISPLAY_ATTRS.get(tag, "")
print tid + ' [label="' + name + "\\n" + tag + multistr + '" ' + display_attrs + "]"
for refnode in node.findall("./*[@REF]"):
reftag = refnode.tag.replace(
"{http://www.interlis.ch/INTERLIS2.3}", "")
orderpos = refnode.get("ORDER_POS")
display_attrs = DISPLAY_ATTRS.get(reftag, "")
if orderpos:
reftag = reftag + "({})".format(orderpos)
if reftag not in IGNORED_EDGE_TAGS:
print tid + " -> " + nodeid(refnode.get("REF")) + ' [label="' + reftag + multistr + '" ' + display_attrs + "]"
else:
relnodes = node.findall("./*[@REF]")
n1 = nodeid(relnodes[0].get("REF"))
#l1 = relnodes[0].tag.replace("{http://www.interlis.ch/INTERLIS2.3}", "")
n2 = nodeid(relnodes[1].get("REF"))
#l2 = relnodes[1].tag.replace("{http://www.interlis.ch/INTERLIS2.3}", "")
# print n1 + " -> " + n2 + ' [headlabel="' + l2 + '"
# taillabel="' + l1 + '" style=dotted]'
orderpos = relnodes[0].get(
"ORDER_POS") or relnodes[1].get("ORDER_POS")
if tag not in IGNORED_EDGE_TAGS:
if orderpos:
tag = tag + "({})".format(orderpos)
print n1 + " -> " + n2 + ' [label="' + tag + multistr + '" style=dotted,color=blue,fontcolor=blue]'
print "{ rank = same; " + ";".join(taggroup["Class"]) + " }"
if "EnumType" in taggroup:
print "{ rank = same; " + ";".join(taggroup["EnumType"]) + " }"
print "}"
print "}"
def main(argv):
output = argv[1]
fn = argv[2]
parser = ImdParser(fn)
if output == 'enumgml':
print prettify(parser.extract_enums_asgml())
elif output == 'enumjson':
enum_tables = parser.extract_enums()
print json.dumps(enum_tables, indent=2)
elif output == 'dot':
#./ogrtools/interlis/ilismeta.py dot tests/data/ili/RoadsExdm2ien.imd | dot -Tsvg >tests/data/ili/RoadsExdm2ien.imd.svg
parser.imd_to_dot()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "9e532f9c3b2d74c2f54b9b01c38b2148",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 138,
"avg_line_length": 41.40925266903915,
"alnum_prop": 0.526727397731179,
"repo_name": "sourcepole/ogrtools",
"id": "9d8ce7d5a43cb826ea15f2c06812cb6d0483beed",
"size": "11636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ogrtools/interlis/ilismeta.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4124"
},
{
"name": "Makefile",
"bytes": "9489"
},
{
"name": "Python",
"bytes": "332864"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from typing import (
AbstractSet, Any, AnyStr, Callable, Dict, Iterable, Mapping, MutableMapping,
Optional, Sequence, Set, Text, Tuple, TypeVar, Union
)
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core import validators
from django.contrib.sessions.models import Session
from zerver.lib.bugdown import (
BugdownRenderingException,
version as bugdown_version
)
from zerver.lib.cache import (
to_dict_cache_key,
to_dict_cache_key_id,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.message import (
access_message,
MessageDict,
message_to_dict,
render_markdown,
)
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, RealmAlias, \
Subscription, Recipient, Message, Attachment, UserMessage, valid_stream_name, \
Client, DefaultStream, UserPresence, Referral, PushDeviceToken, MAX_SUBJECT_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, get_recipient, get_huddle, \
get_user_profile_by_id, PreregistrationUser, get_display_recipient, \
get_realm, bulk_get_recipients, \
email_allowed_for_realm, email_to_username, display_recipient_cache_key, \
get_user_profile_by_email, get_stream_cache_key, \
UserActivityInterval, get_active_user_dicts_in_realm, get_active_streams, \
realm_filters_for_realm, RealmFilter, receives_offline_notifications, \
ScheduledJob, get_owned_bot_dicts, \
get_old_unclaimed_attachments, get_cross_realm_emails, receives_online_notifications, \
Reaction
from zerver.lib.alert_words import alert_words_in_realm
from zerver.lib.avatar import get_avatar_url, avatar_url
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from importlib import import_module
from django.core.mail import EmailMessage
from django.utils.timezone import now
from confirmation.models import Confirmation
import six
from six.moves import filter
from six.moves import map
from six.moves import range
from six import unichr
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.create_user import random_api_key
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from django.utils import timezone
from zerver.lib.create_user import create_user
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, cache_set_many, \
cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.html_diff import highlight_html_differences
from zerver.lib.alert_words import user_alert_words, add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.push_notifications import num_push_devices_for_user, \
send_apple_push_notification, send_android_push_notification
from zerver.lib.notifications import clear_followup_emails_queue
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.request import JsonableError
from zerver.lib.session_user import get_session_user
from zerver.lib.upload import attachment_url_re, attachment_url_to_path_id, \
claim_attachment, delete_message_image
from zerver.lib.str_utils import NonBinaryStr, force_str
from zerver.tornado.event_queue import request_event_queue, get_user_events, send_event
import DNS
import ujson
import time
import traceback
import re
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
import copy
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[Text], AbstractSet[Text]]
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event):
# type: (MutableMapping[str, Any]) -> None
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node()
+ datetime.datetime.now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(force_str(ujson.dumps(event) + u'\n'))
def active_user_ids(realm):
# type: (Realm) -> List[int]
return [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
def can_access_stream_user_ids(stream):
# type: (Stream) -> Set[int]
# return user ids of users who can access the attributes of
# a stream, such as its name/description
if stream.is_public():
return set(active_user_ids(stream.realm))
else:
return private_stream_user_ids(stream)
def private_stream_user_ids(stream):
# type: (Stream) -> Set[int]
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
active=True)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def bot_owner_userids(user_profile):
# type: (UserProfile) -> Sequence[int]
is_private_bot = (
user_profile.default_sending_stream and user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return (user_profile.bot_owner_id,) # TODO: change this to list instead of tuple
else:
return active_user_ids(user_profile.realm)
def realm_user_count(realm):
# type: (Realm) -> int
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def get_topic_history_for_stream(user_profile, recipient):
# type: (UserProfile, Recipient) -> List[Tuple[str, int]]
# We tested the below query on some large prod datasets, and we never
# saw more than 50ms to execute it, so we think that's acceptable,
# but we will monitor it, and we may later optimize it further.
query = '''
SELECT topic, read, count(*)
FROM (
SELECT
("zerver_usermessage"."flags" & 1) as read,
"zerver_message"."subject" as topic,
"zerver_message"."id" as message_id
FROM "zerver_usermessage"
INNER JOIN "zerver_message" ON (
"zerver_usermessage"."message_id" = "zerver_message"."id"
) WHERE (
"zerver_usermessage"."user_profile_id" = %s AND
"zerver_message"."recipient_id" = %s
) ORDER BY "zerver_usermessage"."message_id" DESC
) messages_for_stream
GROUP BY topic, read
ORDER BY max(message_id) desc
'''
cursor = connection.cursor()
cursor.execute(query, [user_profile.id, recipient.id])
rows = cursor.fetchall()
cursor.close()
topic_names = dict() # type: Dict[str, str]
topic_counts = dict() # type: Dict[str, int]
topics = []
for row in rows:
topic_name, read, count = row
if topic_name.lower() not in topic_names:
topic_names[topic_name.lower()] = topic_name
topic_name = topic_names[topic_name.lower()]
if topic_name not in topic_counts:
topic_counts[topic_name] = 0
topics.append(topic_name)
if not read:
topic_counts[topic_name] += count
history = [(topic, topic_counts[topic]) for topic in topics]
return history
def send_signup_message(sender, signups_stream, user_profile,
internal=False, realm=None):
# type: (UserProfile, Text, UserProfile, bool, Optional[Realm]) -> None
if internal:
# When this is done using manage.py vs. the web interface
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
# Send notification to realm notifications stream if it exists
# Don't send notification for the first user in a realm
if user_profile.realm.notifications_stream is not None and user_count > 1:
internal_send_message(
sender,
"stream",
user_profile.realm.notifications_stream.name,
"New users", "%s just signed up for Zulip. Say hello!" % (
user_profile.full_name,),
realm=user_profile.realm)
internal_send_message(
sender,
"stream",
signups_stream,
user_profile.realm.domain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_new_user(user_profile, internal=False):
# type: (UserProfile, bool) -> None
if settings.NEW_USER_BOT is not None:
send_signup_message(settings.NEW_USER_BOT, "signups", user_profile, internal)
statsd.gauge("users.signups.%s" % (user_profile.realm.domain.replace('.', '_')), 1, delta=True)
def add_new_user_history(user_profile, streams):
# type: (UserProfile, Iterable[Stream]) -> None
"""Give you the last 100 messages on your public streams, so you have
something to look at in your home view once you finish the
tutorial."""
one_week_ago = now() - datetime.timedelta(weeks=1)
recipients = Recipient.objects.filter(type=Recipient.STREAM,
type_id__in=[stream.id for stream in streams
if not stream.invite_only])
recent_messages = Message.objects.filter(recipient_id__in=recipients,
pub_date__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list('id', flat=True)[0:100]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id", flat=True))
ums_to_create = [UserMessage(user_profile=user_profile, message_id=message_id,
flags=UserMessage.flags.read)
for message_id in message_ids_to_use
if message_id not in already_ids]
UserMessage.objects.bulk_create(ums_to_create)
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile, prereg_user=None, newsletter_data=None):
# type: (UserProfile, Optional[PreregistrationUser], Optional[Dict[str, str]]) -> None
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
try:
streams = prereg_user.streams.all()
except AttributeError:
# This will catch both the case where prereg_user is None and where it
# is a MitUser.
streams = []
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
bulk_add_subscriptions(streams, [user_profile])
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \
and settings.NOTIFICATION_BOT is not None:
# This is a cross-realm private message.
internal_send_message(
settings.NOTIFICATION_BOT,
"private",
prereg_user.referred_by.email,
user_profile.realm.domain,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(
id=prereg_user.id).update(status=0)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)
notify_new_user(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.email,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile):
# type: (UserProfile) -> None
event = dict(type="realm_user", op="add",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=user_profile.is_realm_admin,
full_name=user_profile.full_name,
is_bot=user_profile.is_bot))
send_event(event, active_user_ids(user_profile.realm))
def notify_created_bot(user_profile):
# type: (UserProfile) -> None
def stream_name(stream):
# type: (Stream) -> Optional[Text]
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
event = dict(type="realm_bot", op="add",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
api_key=user_profile.api_key,
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
owner=user_profile.bot_owner.email,
))
send_event(event, bot_owner_userids(user_profile))
def do_create_user(email, password, realm, full_name, short_name,
active=True, bot_type=None, bot_owner=None, tos_version=None,
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream=None, default_events_register_stream=None,
default_all_public_streams=None, prereg_user=None,
newsletter_data=None):
# type: (Text, Text, Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, Optional[Stream], Optional[Stream], bool, Optional[PreregistrationUser], Optional[Dict[str, str]]) -> UserProfile
event = {'type': 'user_created',
'timestamp': time.time(),
'full_name': full_name,
'short_name': short_name,
'user': email,
'domain': realm.domain,
'bot': bool(bot_type)}
if bot_type:
event['bot_owner'] = bot_owner.email
log_event(event)
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
active=active, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
notify_created_user(user_profile)
if bot_type:
notify_created_bot(user_profile)
else:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data)
return user_profile
def user_sessions(user_profile):
# type: (UserProfile) -> List[Session]
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session):
# type: (Session) -> None
session_engine.SessionStore(session.session_key).delete() # type: ignore # import_module
def delete_user_sessions(user_profile):
# type: (UserProfile) -> None
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm):
# type: (Realm) -> None
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=datetime.datetime.now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions():
# type: () -> None
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions():
# type: () -> None
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None:
continue
user_profile = get_user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.email,))
delete_session(session)
def active_humans_in_realm(realm):
# type: (Realm) -> Sequence[UserProfile]
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_name(realm, name):
# type: (Realm, Text) -> None
realm.name = name
realm.save(update_fields=['name'])
event = dict(
type="realm",
op="update",
property='name',
value=name,
)
send_event(event, active_user_ids(realm))
def do_set_realm_restricted_to_domain(realm, restricted):
# type: (Realm, bool) -> None
realm.restricted_to_domain = restricted
realm.save(update_fields=['restricted_to_domain'])
event = dict(
type="realm",
op="update",
property='restricted_to_domain',
value=restricted,
)
send_event(event, active_user_ids(realm))
def do_set_realm_invite_required(realm, invite_required):
# type: (Realm, bool) -> None
realm.invite_required = invite_required
realm.save(update_fields=['invite_required'])
event = dict(
type="realm",
op="update",
property='invite_required',
value=invite_required,
)
send_event(event, active_user_ids(realm))
def do_set_realm_invite_by_admins_only(realm, invite_by_admins_only):
# type: (Realm, bool) -> None
realm.invite_by_admins_only = invite_by_admins_only
realm.save(update_fields=['invite_by_admins_only'])
event = dict(
type="realm",
op="update",
property='invite_by_admins_only',
value=invite_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_authentication_methods(realm, authentication_methods):
# type: (Realm, Dict[str, bool]) -> None
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(event, active_user_ids(realm))
def do_set_realm_create_stream_by_admins_only(realm, create_stream_by_admins_only):
# type: (Realm, bool) -> None
realm.create_stream_by_admins_only = create_stream_by_admins_only
realm.save(update_fields=['create_stream_by_admins_only'])
event = dict(
type="realm",
op="update",
property='create_stream_by_admins_only',
value=create_stream_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_add_emoji_by_admins_only(realm, add_emoji_by_admins_only):
# type: (Realm, bool) -> None
realm.add_emoji_by_admins_only = add_emoji_by_admins_only
realm.save(update_fields=['add_emoji_by_admins_only'])
event = dict(
type="realm",
op="update",
property='add_emoji_by_admins_only',
value=add_emoji_by_admins_only,
)
send_event(event, active_user_ids(realm))
def do_set_realm_message_editing(realm, allow_message_editing, message_content_edit_limit_seconds):
# type: (Realm, bool, int) -> None
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.save(update_fields=['allow_message_editing', 'message_content_edit_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds),
)
send_event(event, active_user_ids(realm))
def do_set_realm_default_language(realm, default_language):
# type: (Realm, Text) -> None
if default_language == 'zh_CN':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
default_language = 'zh_HANS'
realm.default_language = default_language
realm.save(update_fields=['default_language'])
event = dict(
type="realm",
op="update",
property="default_language",
value=default_language
)
send_event(event, active_user_ids(realm))
def do_set_realm_waiting_period_threshold(realm, threshold):
# type: (Realm, int) -> None
realm.waiting_period_threshold = threshold
realm.save(update_fields=['waiting_period_threshold'])
event = dict(
type="realm",
op="update",
property='waiting_period_threshold',
value=threshold,
)
send_event(event, active_user_ids(realm))
def do_deactivate_realm(realm):
# type: (Realm) -> None
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
def do_reactivate_realm(realm):
# type: (Realm) -> None
realm.deactivated = False
realm.save(update_fields=["deactivated"])
def do_deactivate_user(user_profile, log=True, _cascade=True):
# type: (UserProfile, bool, bool) -> None
if not user_profile.is_active:
return
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
if log:
log_event({'type': 'user_deactivated',
'timestamp': time.time(),
'user': user_profile.email,
'domain': user_profile.realm.domain})
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, active_user_ids(user_profile.realm))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(event, bot_owner_userids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False)
def do_deactivate_stream(stream, log=True):
# type: (Stream, bool) -> None
user_profiles = UserProfile.objects.filter(realm=stream.realm)
for user_profile in user_profiles:
bulk_remove_subscriptions([user_profile], [stream])
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
existing_deactivated_stream = get_stream(new_name, stream.realm)
if existing_deactivated_stream:
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save()
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm)
cache_delete(old_cache_key)
if not was_invite_only:
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(event, active_user_ids(stream.realm))
def do_change_user_email(user_profile, new_email):
# type: (UserProfile, Text) -> None
old_email = user_profile.email
user_profile.email = new_email
user_profile.save(update_fields=["email"])
log_event({'type': 'user_email_changed',
'old_email': old_email,
'new_email': new_email})
def compute_irc_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
return email.split("@")[0] + " (XMPP)"
def compute_mit_user_fullname(email):
# type: (NonBinaryStr) -> NonBinaryStr
try:
# Input is either e.g. username@mit.edu or user|CROSSREALM.INVALID@mit.edu
match_user = re.match(r'^([a-zA-Z0-9_.-]+)(\|.+)?@mit\.edu$', email.lower())
if match_user and match_user.group(2) is None:
answer = DNS.dnslookup(
"%s.passwd.ns.athena.mit.edu" % (match_user.group(1),),
DNS.Type.TXT)
hesiod_name = force_str(answer[0][0]).split(':')[4].split(',')[0].strip()
if hesiod_name != "":
return hesiod_name
elif match_user:
return match_user.group(1).lower() + "@" + match_user.group(2).upper()[1:]
except DNS.Base.ServerError:
pass
except:
print("Error getting fullname for %s:" % (email,))
traceback.print_exc()
return email.lower()
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm, email, email_to_fullname):
# type: (Realm, Text, Callable[[Text], Text]) -> UserProfile
try:
return get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(email, None, realm,
email_to_fullname(email), email_to_username(email),
active=False, is_mirror_dummy=True)
except IntegrityError:
return get_user_profile_by_email(email)
def log_message(message):
# type: (Message) -> None
if not message.sending_client.name.startswith("test:"):
log_event(message.to_log_dict())
# Helper function. Defaults here are overriden by those set in do_send_messages
def do_send_message(message, rendered_content = None, no_log = False, stream = None, local_id = None):
# type: (Union[int, Message], Optional[Text], bool, Optional[Stream], Optional[int]) -> int
return do_send_messages([{'message': message,
'rendered_content': rendered_content,
'no_log': no_log,
'stream': stream,
'local_id': local_id}])[0]
def render_incoming_message(message, content, message_users):
# type: (Message, Text, Set[UserProfile]) -> Text
realm_alert_words = alert_words_in_realm(message.get_realm())
try:
rendered_content = render_markdown(
message=message,
content=content,
realm_alert_words=realm_alert_words,
message_users=message_users,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
def get_recipient_user_profiles(recipient, sender_id):
# type: (Recipient, Text) -> List[UserProfile]
if recipient.type == Recipient.PERSONAL:
recipients = list(set([get_user_profile_by_id(recipient.type_id),
get_user_profile_by_id(sender_id)]))
# For personals, you send out either 1 or 2 copies, for
# personals to yourself or to someone else, respectively.
assert((len(recipients) == 1) or (len(recipients) == 2))
elif (recipient.type == Recipient.STREAM or recipient.type == Recipient.HUDDLE):
# We use select_related()/only() here, while the PERSONAL case above uses
# get_user_profile_by_id() to get UserProfile objects from cache. Streams will
# typically have more recipients than PMs, so get_user_profile_by_id() would be
# a bit more expensive here, given that we need to hit the DB anyway and only
# care about the email from the user profile.
fields = [
'user_profile__id',
'user_profile__email',
'user_profile__enable_online_push_notifications',
'user_profile__is_active',
'user_profile__realm__domain'
]
query = Subscription.objects.select_related("user_profile", "user_profile__realm").only(*fields).filter(
recipient=recipient, active=True)
recipients = [s.user_profile for s in query]
else:
raise ValueError('Bad recipient type')
return recipients
def do_send_messages(messages):
# type: (Sequence[Optional[MutableMapping[str, Any]]]) -> List[int]
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['no_log'] = message.get('no_log', False)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
# Log the message to our message log for populate_db to refill
for message in messages:
if not message['no_log']:
log_message(message['message'])
for message in messages:
message['recipients'] = get_recipient_user_profiles(message['message'].recipient,
message['message'].sender_id)
# Only deliver the message to active user recipients
message['active_recipients'] = [user_profile for user_profile in message['recipients']
if user_profile.is_active]
links_for_embed = set() # type: Set[Text]
# Render our messages.
for message in messages:
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message_users=message['active_recipients'])
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
for message in messages:
message['message'].update_calculated_fields()
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
ums = [] # type: List[UserMessage]
for message in messages:
ums_to_create = [UserMessage(user_profile=user_profile, message=message['message'])
for user_profile in message['active_recipients']]
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message['message'].mentions_wildcard
mentioned_ids = message['message'].mentions_user_ids
ids_with_alert_words = message['message'].user_ids_with_alert_words
is_me_message = message['message'].is_me_message
for um in ums_to_create:
if um.user_profile.id == message['message'].sender.id and \
message['message'].sent_by_human():
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if is_me_message:
um.flags |= UserMessage.flags.is_me_message
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(ums_to_create)
UserMessage.objects.bulk_create(ums)
# Claim attachments in message
for message in messages:
if Message.content_has_attachment(message['message'].content):
do_claim_attachments(message['message'])
for message in messages:
# Render Markdown etc. here and store (automatically) in
# remote cache, so that the single-threaded Tornado server
# doesn't have to.
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
user_presences = get_status_dict(sender)
presences = {}
for user_profile in message['active_recipients']:
if user_profile.email in user_presences:
presences[user_profile.id] = user_presences[user_profile.email]
event = dict(
type = 'message',
message = message['message'].id,
message_dict_markdown = message_to_dict(message['message'], apply_markdown=True),
message_dict_no_markdown = message_to_dict(message['message'], apply_markdown=False),
presences = presences)
users = [{'id': user.id,
'flags': user_flags.get(user.id, []),
'always_push_notify': user.enable_online_push_notifications}
for user in message['active_recipients']]
if message['message'].recipient.type == Recipient.STREAM:
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
message['stream'] = Stream.objects.select_related("realm").get(id=message['message'].recipient.type_id)
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(event, users)
if settings.INLINE_URL_EMBED_PREVIEW and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data, lambda x: None)
if (settings.ENABLE_FEEDBACK and
message['message'].recipient.type == Recipient.PERSONAL and
settings.FEEDBACK_BOT in [up.email for up in message['recipients']]):
queue_json_publish(
'feedback_messages',
message_to_dict(message['message'], apply_markdown=False),
lambda x: None
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
def do_add_reaction(user_profile, message, emoji_name):
# type: (UserProfile, Message, Text) -> None
reaction = Reaction(user_profile=user_profile, message=message, emoji_name=emoji_name)
reaction.save()
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': 'add',
'user': user_dict,
'message_id': message.id,
'emoji_name': emoji_name} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
ums = UserMessage.objects.filter(message=message.id)
send_event(event, [um.user_profile_id for um in ums])
def do_remove_reaction(user_profile, message, emoji_name):
# type: (UserProfile, Message, Text) -> None
Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).delete()
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name} # type: Dict[str, Any]
event = {'type': 'reaction',
'op': 'remove',
'user': user_dict,
'message_id': message.id,
'emoji_name': emoji_name} # type: Dict[str, Any]
# Clear the cached message since reaction is removed.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
ums = UserMessage.objects.filter(message=message.id)
send_event(event, [um.user_profile_id for um in ums])
def do_send_typing_notification(notification):
# type: (Dict[str, Any]) -> None
recipient_user_profiles = get_recipient_user_profiles(notification['recipient'],
notification['sender'].id)
# Only deliver the notification to active user recipients
user_ids_to_notify = [profile.id for profile in recipient_user_profiles if profile.is_active]
sender_dict = {'user_id': notification['sender'].id, 'email': notification['sender'].email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email} for profile in recipient_user_profiles]
event = dict(
type = 'typing',
op = notification['op'],
sender = sender_dict,
recipients = recipient_dicts)
send_event(event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender, notification_to, operator):
# type: (UserProfile, Sequence[Text], Text) -> None
typing_notification = check_typing_notification(sender, notification_to, operator)
do_send_typing_notification(typing_notification)
# check_typing_notification:
# Returns typing notification ready for sending with do_send_typing_notification on success
# or the error message (string) on error.
def check_typing_notification(sender, notification_to, operator):
# type: (UserProfile, Sequence[Text], Text) -> Dict[str, Any]
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
else:
try:
recipient = recipient_for_emails(notification_to, False,
sender, sender)
except ValidationError as e:
assert isinstance(e.messages[0], six.string_types)
raise JsonableError(e.messages[0])
if recipient.type == Recipient.STREAM:
raise ValueError('Forbidden recipient type')
return {'sender': sender, 'recipient': recipient, 'op': operator}
def do_create_stream(realm, stream_name):
# type: (Realm, Text) -> None
# This is used by a management command now, mostly to facilitate testing. It
# doesn't simulate every single aspect of creating a subscription; for example,
# we don't send Zulips to users to tell them they have been subscribed.
stream = Stream()
stream.realm = realm
stream.name = stream_name
stream.save()
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
subscribers = UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
bulk_add_subscriptions([stream], subscribers)
def create_stream_if_needed(realm, stream_name, invite_only=False, stream_description = ""):
# type: (Realm, Text, bool, Text) -> Tuple[Stream, bool]
(stream, created) = Stream.objects.get_or_create(
realm=realm, name__iexact=stream_name,
defaults={'name': stream_name,
'description': stream_description,
'invite_only': invite_only})
if created:
Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
if not invite_only:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(event, active_user_ids(realm))
return stream, created
def create_streams_if_needed(realm, stream_dicts):
# type: (Realm, List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_description=stream_dict.get("description", ""))
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def recipient_for_emails(emails, not_forged_mirror_message,
user_profile, sender):
# type: (Iterable[Text], bool, UserProfile, UserProfile) -> Recipient
recipient_profile_ids = set()
# We exempt cross-realm bots from the check that all the recipients
# are in the same domain.
realm_domains = set()
exempt_emails = get_cross_realm_emails()
if sender.email not in exempt_emails:
realm_domains.add(sender.realm.domain)
for email in emails:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \
user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (email,))
recipient_profile_ids.add(user_profile.id)
if email not in exempt_emails:
realm_domains.add(user_profile.realm.domain)
if not_forged_mirror_message and user_profile.id not in recipient_profile_ids:
raise ValidationError(_("User not authorized for this query"))
if len(realm_domains) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profile_ids) == 2
and sender.id in recipient_profile_ids):
recipient_profile_ids.remove(sender.id)
if len(recipient_profile_ids) > 1:
# Make sure the sender is included in huddle messages
recipient_profile_ids.add(sender.id)
huddle = get_huddle(list(recipient_profile_ids))
return get_recipient(Recipient.HUDDLE, huddle.id)
else:
return get_recipient(Recipient.PERSONAL, list(recipient_profile_ids)[0])
def already_sent_mirrored_message_id(message):
# type: (Message) -> Optional[int]
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
messages = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
subject=message.subject,
sending_client=message.sending_client,
pub_date__gte=message.pub_date - time_window,
pub_date__lte=message.pub_date + time_window)
if messages.exists():
return messages[0].id
return None
def extract_recipients(s):
# type: (Union[str, Iterable[Text]]) -> List[Text]
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s) # type: ignore # This function has a super weird union argument.
except ValueError:
data = s
if isinstance(data, six.string_types):
data = data.split(',') # type: ignore # https://github.com/python/typeshed/pull/138
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
recipients = data
# Strip recipients, and then remove any duplicates and any that
# are the empty string after being stripped.
recipients = [recipient.strip() for recipient in recipients]
return list(set(recipient for recipient in recipients if recipient))
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Text, Sequence[Text], Text, Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> int
message = check_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id)
return do_send_messages([message])[0]
def check_stream_name(stream_name):
# type: (Text) -> None
if stream_name == "":
raise JsonableError(_("Stream can't be empty"))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long"))
if not valid_stream_name(stream_name):
raise JsonableError(_("Invalid stream name"))
def send_pm_if_empty_stream(sender, stream, stream_name, realm):
# type: (UserProfile, Stream, Text, Realm) -> None
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
if stream is not None:
num_subscribers = stream.num_subscribers()
if num_subscribers > 0:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone.now() - last_reminder <= waitperiod:
return
if stream is None:
error_msg = "that stream does not yet exist. To create it, "
else:
# num_subscribers == 0
error_msg = "there are no subscribers to that stream. To join it, "
content = ("Hi there! We thought you'd like to know that your bot **%s** just "
"tried to send a message to stream `%s`, but %s"
"click the gear in the left-side stream list." %
(sender.full_name, stream_name, error_msg))
message = internal_prep_message(settings.NOTIFICATION_BOT, "private",
sender.bot_owner.email, "", content)
do_send_messages([message])
sender.last_reminder = timezone.now()
sender.save(update_fields=['last_reminder'])
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender, client, message_type_name, message_to,
subject_name, message_content, realm=None, forged=False,
forged_timestamp=None, forwarder_user_profile=None, local_id=None,
sender_queue_id=None):
# type: (UserProfile, Client, Text, Sequence[Text], Text, Text, Optional[Realm], bool, Optional[float], Optional[UserProfile], Optional[Text], Optional[Text]) -> Dict[str, Any]
stream = None
if not message_to and message_type_name == 'stream' and sender.default_sending_stream:
# Use the users default stream
message_to = [sender.default_sending_stream.name]
elif len(message_to) == 0:
raise JsonableError(_("Message must have recipients"))
if len(message_content.strip()) == 0:
raise JsonableError(_("Message must not be empty"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if message_type_name == 'stream':
if len(message_to) > 1:
raise JsonableError(_("Cannot send to multiple streams"))
stream_name = message_to[0].strip()
check_stream_name(stream_name)
if subject_name is None:
raise JsonableError(_("Missing topic"))
subject = subject_name.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
subject = truncate_topic(subject)
## FIXME: Commented out temporarily while we figure out what we want
# if not valid_stream_name(subject):
# return json_error(_("Invalid subject name"))
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(sender, stream, stream_name, realm)
if stream is None:
raise JsonableError(_("Stream '%(stream_name)s' does not exist") % {'stream_name': escape(stream_name)})
recipient = get_recipient(Recipient.STREAM, stream.id)
if not stream.invite_only:
# This is a public stream
pass
elif subscribed_to_stream(sender, stream):
# Or it is private, but your are subscribed
pass
elif sender.is_api_super_user or (forwarder_user_profile is not None and
forwarder_user_profile.is_api_super_user):
# Or this request is being done on behalf of a super user
pass
elif sender.is_bot and subscribed_to_stream(sender.bot_owner, stream):
# Or you're a bot and your owner is subscribed.
pass
else:
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
elif message_type_name == 'private':
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]
not_forged_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_emails(message_to, not_forged_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], six.string_types)
raise JsonableError(e.messages[0])
else:
raise JsonableError(_("Invalid message type"))
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if message_type_name == 'stream':
message.subject = subject
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.pub_date = timestamp_to_datetime(forged_timestamp)
else:
message.pub_date = timezone.now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
return {'message': message, 'stream': stream, 'local_id': local_id, 'sender_queue_id': sender_queue_id}
def internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
# type: (Text, str, Text, Text, Text, Optional[Realm]) -> Optional[Dict[str, Any]]
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
sender = get_user_profile_by_email(sender_email)
if realm is None:
realm = sender.realm
parsed_recipients = extract_recipients(recipients)
if recipient_type_name == "stream":
stream, _ = create_stream_if_needed(realm, parsed_recipients[0])
try:
return check_message(sender, get_client("Internal"), recipient_type_name,
parsed_recipients, subject, content, realm)
except JsonableError as e:
logging.error("Error queueing internal message by %s: %s" % (sender_email, str(e)))
return None
def internal_send_message(sender_email, recipient_type_name, recipients,
subject, content, realm=None):
# type: (Text, str, Text, Text, Text, Optional[Realm]) -> None
msg = internal_prep_message(sender_email, recipient_type_name, recipients,
subject, content, realm)
# internal_prep_message encountered an error
if msg is None:
return
do_send_messages([msg])
def pick_color(user_profile):
# type: (UserProfile) -> Text
subs = Subscription.objects.filter(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM)
return pick_color_helper(user_profile, subs)
def pick_color_helper(user_profile, subs):
# type: (UserProfile, Iterable[Subscription]) -> Text
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def get_subscription(stream_name, user_profile):
# type: (Text, UserProfile) -> Subscription
stream = get_stream(stream_name, user_profile.realm)
recipient = get_recipient(Recipient.STREAM, stream.id)
return Subscription.objects.get(user_profile=user_profile,
recipient=recipient, active=True)
def validate_user_access_to_subscribers(user_profile, stream):
# type: (Optional[UserProfile], Stream) -> None
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm__domain": stream.realm.domain,
"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(user_profile, stream))
def validate_user_access_to_subscribers_helper(user_profile, stream_dict, check_user_subscribed):
# type: (Optional[UserProfile], Mapping[str, Any], Callable[[], bool]) -> None
""" Helper for validate_user_access_to_subscribers that doesn't require a full stream object
* check_user_subscribed is a function that when called with no
arguments, will report whether the user is subscribed to the stream
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
if user_profile.realm.is_zephyr_mirror_realm and not stream_dict["invite_only"]:
raise JsonableError(_("You cannot get subscribers for public streams in this realm"))
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for invite-only stream"))
# sub_dict is a dictionary mapping stream_id => whether the user is subscribed to that stream
def bulk_get_subscriber_user_ids(stream_dicts, user_profile, sub_dict):
# type: (Iterable[Mapping[str, Any]], UserProfile, Mapping[int, bool]) -> Dict[int, List[int]]
target_stream_dicts = []
for stream_dict in stream_dicts:
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
subscriptions = Subscription.objects.select_related("recipient").filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream["id"] for stream in target_stream_dicts],
user_profile__is_active=True,
active=True).values("user_profile_id", "recipient__type_id")
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
for sub in subscriptions:
result[sub["recipient__type_id"]].append(sub["user_profile_id"])
return result
def get_subscribers_query(stream, requesting_user):
# type: (Stream, UserProfile) -> QuerySet
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
user_profile__is_active=True,
active=True)
return subscriptions
def get_subscribers(stream, requesting_user=None):
# type: (Stream, Optional[UserProfile]) -> List[UserProfile]
subscriptions = get_subscribers_query(stream, requesting_user).select_related()
return [subscription.user_profile for subscription in subscriptions]
def get_subscriber_emails(stream, requesting_user=None):
# type: (Stream, Optional[UserProfile]) -> List[Text]
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def maybe_get_subscriber_emails(stream, user_profile):
# type: (Stream, UserProfile) -> List[Text]
""" Alternate version of get_subscriber_emails that takes a Stream object only
(not a name), and simply returns an empty list if unable to get a real
subscriber list (because we're on the MIT realm). """
try:
subscribers = get_subscriber_emails(stream, requesting_user=user_profile)
except JsonableError:
subscribers = []
return subscribers
def set_stream_color(user_profile, stream_name, color=None):
# type: (UserProfile, Text, Optional[Text]) -> Text
subscription = get_subscription(stream_name, user_profile)
if not color:
color = pick_color(user_profile)
subscription.color = color
subscription.save(update_fields=["color"])
return color
def notify_subscriptions_added(user_profile, sub_pairs, stream_emails, no_log=False):
# type: (UserProfile, Iterable[Tuple[Subscription, Stream]], Callable[[Stream], List[Text]], bool) -> None
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'domain': user_profile.realm.domain})
# Send a notification to the user who subscribed.
payload = [dict(name=stream.name,
stream_id=stream.id,
in_home_view=subscription.in_home_view,
invite_only=stream.invite_only,
color=subscription.color,
email_address=encode_email_address(stream),
desktop_notifications=subscription.desktop_notifications,
audible_notifications=subscription.audible_notifications,
description=stream.description,
pin_to_top=subscription.pin_to_top,
subscribers=stream_emails(stream))
for (subscription, stream) in sub_pairs]
event = dict(type="subscription", op="add",
subscriptions=payload)
send_event(event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream, altered_users, subscribed_users):
# type: (Stream, Iterable[UserProfile], Iterable[UserProfile]) -> Set[int]
'''
altered_users is a list of users that we are adding/removing
subscribed_users is the list of already subscribed users
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
altered_user_ids = [user.id for user in altered_users]
if stream.invite_only:
# PRIVATE STREAMS
all_subscribed_ids = [user.id for user in subscribed_users]
return set(all_subscribed_ids) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_user_ids(stream.realm)) - set(altered_user_ids)
def query_all_subs_by_stream(streams):
# type: (Iterable[Stream]) -> Dict[int, List[UserProfile]]
all_subs = Subscription.objects.filter(recipient__type=Recipient.STREAM,
recipient__type_id__in=[stream.id for stream in streams],
user_profile__is_active=True,
active=True).select_related('recipient', 'user_profile')
all_subs_by_stream = defaultdict(list) # type: Dict[int, List[UserProfile]]
for sub in all_subs:
all_subs_by_stream[sub.recipient.type_id].append(sub.user_profile)
return all_subs_by_stream
def bulk_add_subscriptions(streams, users):
# type: (Iterable[Stream], Iterable[UserProfile]) -> Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams]) # type: Mapping[int, Recipient]
recipients = [recipient.id for recipient in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = Subscription.objects.select_related("user_profile")
for sub in all_subs_query.filter(user_profile__in=users,
recipient__type=Recipient.STREAM):
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipients) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
color = pick_color_helper(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id,
desktop_notifications=user_profile.enable_stream_desktop_notifications,
audible_notifications=user_profile.enable_stream_sounds)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
Subscription.objects.filter(id__in=[sub.id for (sub, stream) in subs_to_activate]).update(active=True)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(event, active_user_ids(user_profile.realm))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subs_by_stream = query_all_subs_by_stream(streams=streams)
def fetch_stream_subscriber_emails(stream):
# type: (Stream) -> List[Text]
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
return []
users = all_subs_by_stream[stream.id]
return [u.email for u in users]
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_emails)
for stream in streams:
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
continue
new_users = [user for user in users if (user.id, stream.id) in new_streams]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_users=new_users,
subscribed_users=all_subs_by_stream[stream.id]
)
if peer_user_ids:
for added_user in new_users:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=added_user.id)
send_event(event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def notify_subscriptions_removed(user_profile, streams, no_log=False):
# type: (UserProfile, Iterable[Stream], bool) -> None
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'domain': user_profile.realm.domain})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(event, [user_profile.id])
def bulk_remove_subscriptions(users, streams):
# type: (Iterable[UserProfile], Iterable[Stream]) -> Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
recipients_map = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in streams]) # type: Mapping[int, Recipient]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id].id] = stream
subs_by_user = dict((user_profile.id, []) for user_profile in users) # type: Dict[int, List[Subscription]]
for sub in Subscription.objects.select_related("user_profile").filter(user_profile__in=users,
recipient__in=list(recipients_map.values()),
active=True):
subs_by_user[sub.user_profile_id].append(sub)
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
recipients_to_unsub = set([recipient.id for recipient in recipients_map.values()])
for sub in subs_by_user[user_profile.id]:
recipients_to_unsub.remove(sub.recipient_id)
subs_to_deactivate.append((sub, stream_map[sub.recipient_id]))
for recipient_id in recipients_to_unsub:
not_subscribed.append((user_profile, stream_map[recipient_id]))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(user_profile.realm))
Subscription.objects.filter(id__in=[sub.id for (sub, stream_name) in
subs_to_deactivate]).update(active=False)
occupied_streams_after = list(get_occupied_streams(user_profile.realm))
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)
if not stream.invite_only]
if new_vacant_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_streams])
send_event(event, active_user_ids(user_profile.realm))
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
all_subs_by_stream = query_all_subs_by_stream(streams=streams)
for stream in streams:
if stream.realm.is_zephyr_mirror_realm and not stream.invite_only:
continue
altered_users = altered_user_dict[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_users=altered_users,
subscribed_users=all_subs_by_stream[stream.id]
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(event, peer_user_ids)
return ([(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed)
def log_subscription_property_change(user_email, stream_name, property, value):
# type: (Text, Text, Text, Any) -> None
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile, sub, stream_name,
property_name, value):
# type: (UserProfile, Subscription, Text, Text, Any) -> None
setattr(sub, property_name, value)
sub.save(update_fields=[property_name])
log_subscription_property_change(user_profile.email, stream_name,
property_name, value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=property_name,
value=value,
name=stream_name)
send_event(event, [user_profile.id])
def do_activate_user(user_profile, log=True, join_date=timezone.now()):
# type: (UserProfile, bool, datetime.datetime) -> None
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = join_date
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
if log:
domain = user_profile.realm.domain
log_event({'type': 'user_activated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_reactivate_user(user_profile):
# type: (UserProfile) -> None
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
domain = user_profile.realm.domain
log_event({'type': 'user_reactivated',
'user': user_profile.email,
'domain': domain})
notify_created_user(user_profile)
def do_change_password(user_profile, password, log=True, commit=True,
hashed_password=False):
# type: (UserProfile, Text, bool, bool, bool) -> None
if hashed_password:
# This is a hashed password, not the password itself.
user_profile.set_password(password)
else:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
if log:
log_event({'type': 'user_change_password',
'user': user_profile.email,
'pwhash': user_profile.password})
def do_change_full_name(user_profile, full_name, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
if log:
log_event({'type': 'user_change_full_name',
'user': user_profile.email,
'full_name': full_name})
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm))
if user_profile.is_bot:
send_event(dict(type='realm_bot', op='update', bot=payload),
bot_owner_userids(user_profile))
def do_change_tos_version(user_profile, tos_version, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
if log:
log_event({'type': 'user_change_tos_version',
'user': user_profile.email,
'tos_version': tos_version})
def do_regenerate_api_key(user_profile, log=True):
# type: (UserProfile, bool) -> None
user_profile.api_key = random_api_key()
user_profile.save(update_fields=["api_key"])
if log:
log_event({'type': 'user_change_api_key',
'user': user_profile.email})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=user_profile.api_key,
)),
bot_owner_userids(user_profile))
def do_change_avatar_source(user_profile, avatar_source, log=True):
# type: (UserProfile, Text, bool) -> None
user_profile.avatar_source = avatar_source
user_profile.save(update_fields=["avatar_source"])
if log:
log_event({'type': 'user_change_avatar_source',
'user': user_profile.email,
'avatar_source': avatar_source})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_userids(user_profile))
else:
payload = dict(
email=user_profile.email,
avatar_url=avatar_url(user_profile),
user_id=user_profile.id
)
send_event(dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm))
def _default_stream_permision_check(user_profile, stream):
# type: (UserProfile, Optional[Stream]) -> None
# Any user can have a None default stream
if stream is not None:
if user_profile.is_bot:
user = user_profile.bot_owner
else:
user = user_profile
if stream.invite_only and not subscribed_to_stream(user, stream):
raise JsonableError(_('Insufficient permission'))
def do_change_default_sending_stream(user_profile, stream, log=True):
# type: (UserProfile, Stream, bool) -> None
_default_stream_permision_check(user_profile, stream)
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_userids(user_profile))
def do_change_default_events_register_stream(user_profile, stream, log=True):
# type: (UserProfile, Stream, bool) -> None
_default_stream_permision_check(user_profile, stream)
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name
else:
stream_name = None
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_userids(user_profile))
def do_change_default_all_public_streams(user_profile, value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_userids(user_profile))
def do_change_is_admin(user_profile, value, permission='administer'):
# type: (UserProfile, bool, str) -> None
if permission == "administer":
user_profile.is_realm_admin = value
user_profile.save(update_fields=["is_realm_admin"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise Exception("Unknown permission")
if permission == 'administer':
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
is_admin=value))
send_event(event, active_user_ids(user_profile.realm))
def do_change_bot_type(user_profile, value):
# type: (UserProfile, int) -> None
user_profile.bot_type = value
user_profile.save(update_fields=["bot_type"])
def do_make_stream_public(user_profile, realm, stream_name):
# type: (UserProfile, Realm, Text) -> None
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (stream_name,))
if not subscribed_to_stream(user_profile, stream):
raise JsonableError(_('You are not invited to this stream.'))
stream.invite_only = False
stream.save(update_fields=['invite_only'])
def do_make_stream_private(realm, stream_name):
# type: (Realm, Text) -> None
stream_name = stream_name.strip()
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (stream_name,))
stream.invite_only = True
stream.save(update_fields=['invite_only'])
def do_rename_stream(realm, old_name, new_name, log=True):
# type: (Realm, Text, Text, bool) -> Dict[str, Text]
old_name = old_name.strip()
new_name = new_name.strip()
stream = get_stream(old_name, realm)
if not stream:
raise JsonableError(_('Unknown stream "%s"') % (old_name,))
# Will raise if there's an issue.
check_stream_name(new_name)
if get_stream(new_name, realm) and old_name.lower() != new_name.lower():
raise JsonableError(_('Stream name "%s" is already taken') % (new_name,))
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'domain': realm.domain,
'new_name': new_name})
recipient = get_recipient(Recipient.STREAM, stream.id)
messages = Message.objects.filter(recipient=recipient).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, realm)
new_cache_key = get_stream_cache_key(stream.name, realm)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient.id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id, True) for message in messages)
cache_delete_many(
to_dict_cache_key_id(message.id, False) for message in messages)
new_email = encode_email_address(stream)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
name=old_name
)
send_event(event, can_access_stream_user_ids(stream))
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(realm, stream_name, new_description):
# type: (Realm, Text, Text) -> None
stream = get_stream(stream_name, realm)
stream.description = new_description
stream.save(update_fields=['description'])
event = dict(type='stream', op='update',
property='description', name=stream_name,
value=new_description)
send_event(event, can_access_stream_user_ids(stream))
def do_create_realm(string_id, name, restricted_to_domain=None,
invite_required=None, org_type=None):
# type: (Text, Text, Optional[bool], Optional[bool], Optional[int]) -> Tuple[Realm, bool]
realm = get_realm(string_id)
created = not realm
if created:
kwargs = {} # type: Dict[str, Any]
if restricted_to_domain is not None:
kwargs['restricted_to_domain'] = restricted_to_domain
if invite_required is not None:
kwargs['invite_required'] = invite_required
if org_type is not None:
kwargs['org_type'] = org_type
realm = Realm(string_id=string_id, name=name,
domain=string_id + '@acme.com', **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream, _ = create_stream_if_needed(realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME)
realm.notifications_stream = notifications_stream
realm.save(update_fields=['notifications_stream'])
# Include a welcome message in this notifications stream
product_name = "Zulip"
content = """Hello, and welcome to %s!
This is a message on stream `%s` with the topic `welcome`. We'll use this stream for
system-generated notifications.""" % (product_name, notifications_stream.name,)
msg = internal_prep_message(settings.WELCOME_BOT, 'stream',
notifications_stream.name, "welcome",
content, realm=realm)
do_send_messages([msg])
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"restricted_to_domain": restricted_to_domain,
"invite_required": invite_required,
"org_type": org_type})
if settings.NEW_USER_BOT is not None:
signup_message = "Signups enabled"
internal_send_message(settings.NEW_USER_BOT, "stream",
"signups", string_id, signup_message)
return (realm, created)
def do_change_enable_stream_desktop_notifications(user_profile,
enable_stream_desktop_notifications,
log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_stream_desktop_notifications = enable_stream_desktop_notifications
user_profile.save(update_fields=["enable_stream_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_desktop_notifications',
'setting': enable_stream_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_stream_sounds(user_profile, enable_stream_sounds, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_stream_sounds = enable_stream_sounds
user_profile.save(update_fields=["enable_stream_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_stream_sounds',
'setting': enable_stream_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_desktop_notifications = enable_desktop_notifications
user_profile.save(update_fields=["enable_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_desktop_notifications',
'setting': enable_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_pm_content_in_desktop_notifications(user_profile,
pm_content_in_desktop_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.pm_content_in_desktop_notifications \
= pm_content_in_desktop_notifications
user_profile.save(update_fields=["pm_content_in_desktop_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'pm_content_in_desktop_notifications',
'setting': pm_content_in_desktop_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_sounds(user_profile, enable_sounds, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_sounds = enable_sounds
user_profile.save(update_fields=["enable_sounds"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_sounds',
'setting': enable_sounds}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_email_notifications(user_profile, offline_email_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_offline_email_notifications = offline_email_notifications
user_profile.save(update_fields=["enable_offline_email_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_email_notifications',
'setting': offline_email_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_offline_push_notifications(user_profile, offline_push_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_offline_push_notifications = offline_push_notifications
user_profile.save(update_fields=["enable_offline_push_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_offline_push_notifications',
'setting': offline_push_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_online_push_notifications(user_profile, enable_online_push_notifications, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_online_push_notifications = enable_online_push_notifications
user_profile.save(update_fields=["enable_online_push_notifications"])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_online_push_notifications',
'setting': enable_online_push_notifications}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_enable_digest_emails(user_profile, enable_digest_emails, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.enable_digest_emails = enable_digest_emails
user_profile.save(update_fields=["enable_digest_emails"])
if not enable_digest_emails:
# Remove any digest emails that have been enqueued.
clear_followup_emails_queue(user_profile.email)
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': 'enable_digest_emails',
'setting': enable_digest_emails}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_autoscroll_forever(user_profile, autoscroll_forever, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.autoscroll_forever = autoscroll_forever
user_profile.save(update_fields=["autoscroll_forever"])
if log:
log_event({'type': 'autoscroll_forever',
'user': user_profile.email,
'autoscroll_forever': autoscroll_forever})
def do_change_enter_sends(user_profile, enter_sends):
# type: (UserProfile, bool) -> None
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_change_default_desktop_notifications(user_profile, default_desktop_notifications):
# type: (UserProfile, bool) -> None
user_profile.default_desktop_notifications = default_desktop_notifications
user_profile.save(update_fields=["default_desktop_notifications"])
def do_change_twenty_four_hour_time(user_profile, setting_value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.twenty_four_hour_time = setting_value
user_profile.save(update_fields=["twenty_four_hour_time"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'twenty_four_hour_time',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_left_side_userlist(user_profile, setting_value, log=True):
# type: (UserProfile, bool, bool) -> None
user_profile.left_side_userlist = setting_value
user_profile.save(update_fields=["left_side_userlist"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'left_side_userlist',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def do_change_default_language(user_profile, setting_value, log=True):
# type: (UserProfile, Text, bool) -> None
if setting_value == 'zh_CN':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
setting_value = 'zh_HANS'
user_profile.default_language = setting_value
user_profile.save(update_fields=["default_language"])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': 'default_language',
'setting': setting_value}
if log:
log_event(event)
send_event(event, [user_profile.id])
def set_default_streams(realm, stream_dict):
# type: (Realm, Dict[Text, Dict[Text, Any]]) -> None
DefaultStream.objects.filter(realm=realm).delete()
stream_names = []
for name, options in stream_dict.items():
stream_names.append(name)
stream, _ = create_stream_if_needed(realm,
name,
invite_only = options["invite_only"],
stream_description = options["description"])
DefaultStream.objects.create(stream=stream, realm=realm)
# Always include the realm's default notifications streams, if it exists
if realm.notifications_stream is not None:
DefaultStream.objects.get_or_create(stream=realm.notifications_stream, realm=realm)
log_event({'type': 'default_streams',
'domain': realm.domain,
'streams': stream_names})
def notify_default_streams(realm):
# type: (Realm) -> None
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm))
)
send_event(event, active_user_ids(realm))
def do_add_default_stream(realm, stream_name):
# type: (Realm, Text) -> None
stream, _ = create_stream_if_needed(realm, stream_name)
if not DefaultStream.objects.filter(realm=realm, stream=stream).exists():
DefaultStream.objects.create(realm=realm, stream=stream)
notify_default_streams(realm)
def do_remove_default_stream(realm, stream_name):
# type: (Realm, Text) -> None
stream = get_stream(stream_name, realm)
if stream is None:
raise JsonableError(_("Stream does not exist"))
DefaultStream.objects.filter(realm=realm, stream=stream).delete()
notify_default_streams(realm)
def get_default_streams_for_realm(realm):
# type: (Realm) -> List[Stream]
return [default.stream for default in
DefaultStream.objects.select_related("stream", "stream__realm").filter(realm=realm)]
def get_default_subs(user_profile):
# type: (UserProfile) -> List[Stream]
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams):
# type: (List[Stream]) -> List[Dict[str, Any]]
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile, log_time):
# type: (UserProfile, datetime.datetime) -> None
effective_end = log_time + datetime.timedelta(minutes=15)
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile, client, query, log_time):
# type: (UserProfile, Client, Text, datetime.datetime) -> None
(activity, created) = UserActivity.objects.get_or_create(
user_profile = user_profile,
client = client,
query = query,
defaults={'last_visit': log_time, 'count': 0})
activity.count += 1
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile, presence):
# type: (UserProfile, UserPresence) -> None
presence_dict = presence.to_dict()
event = dict(type="presence", email=user_profile.email,
server_timestamp=time.time(),
presence={presence_dict['client']: presence.to_dict()})
send_event(event, active_user_ids(user_profile.realm))
def consolidate_client(client):
# type: (Client) -> Client
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile, client, log_time, status):
# type: (UserProfile, Client, datetime.datetime, int) -> None
client = consolidate_client(client)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = {'timestamp': log_time,
'status': status})
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.is_zephyr_mirror_realm and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile, log_time):
# type: (UserProfile, datetime.datetime) -> None
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event,
lambda e: do_update_user_activity_interval(user_profile, log_time))
def update_user_presence(user_profile, client, log_time, status,
new_user_input):
# type: (UserProfile, Client, datetime.datetime, int, bool) -> None
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event,
lambda e: do_update_user_presence(user_profile, client,
log_time, status))
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile, pointer, update_flags=False):
# type: (UserProfile, int, bool) -> None
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags:
# Until we handle the new read counts in the Android app
# natively, this is a shim that will mark as read any messages
# up until the pointer move
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer,
flags=~UserMessage.flags.read) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
event = dict(type='pointer', pointer=pointer)
send_event(event, [user_profile.id])
def do_update_message_flags(user_profile, operation, flag, messages, all, stream_obj, topic_name):
# type: (UserProfile, Text, Text, Sequence[int], bool, Optional[Stream], Optional[Text]) -> int
flagattr = getattr(UserMessage.flags, flag)
if all:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(user_profile=user_profile)
elif stream_obj is not None:
recipient = get_recipient(Recipient.STREAM, stream_obj.id)
if topic_name:
msgs = UserMessage.objects.filter(message__recipient=recipient,
user_profile=user_profile,
message__subject__iexact=topic_name)
else:
msgs = UserMessage.objects.filter(message__recipient=recipient, user_profile=user_profile)
else:
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# Hack to let you star any message
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
# The filter() statements below prevent postgres from doing a lot of
# unnecessary work, which is a big deal for users updating lots of
# flags (e.g. bankruptcy). This patch arose from seeing slow calls
# to POST /json/messages/flags in the logs. The filter() statements
# are kind of magical; they are actually just testing the one bit.
if operation == 'add':
msgs = msgs.filter(flags=~flagattr)
if stream_obj:
messages = list(msgs.values_list('message__id', flat=True))
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
msgs = msgs.filter(flags=flagattr)
if stream_obj:
messages = list(msgs.values_list('message__id', flat=True))
count = msgs.update(flags=F('flags').bitand(~flagattr))
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': all}
log_event(event)
send_event(event, [user_profile.id])
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile, stream):
# type: (UserProfile, Stream) -> bool
try:
if Subscription.objects.get(user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id):
return True
return False
except Subscription.DoesNotExist:
return False
def truncate_content(content, max_length, truncation_message):
# type: (Text, int, Text) -> Text
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body):
# type: (Text) -> Text
return truncate_content(body, MAX_MESSAGE_LENGTH, "...")
def truncate_topic(topic):
# type: (Text) -> Text
return truncate_content(topic, MAX_SUBJECT_LENGTH, "...")
def update_user_message_flags(message, ums):
# type: (Message, Iterable[UserMessage]) -> None
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um, should_set, flag):
# type: (UserMessage, bool, int) -> None
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
is_me_message = getattr(message, 'is_me_message', False)
update_flag(um, is_me_message, UserMessage.flags.is_me_message)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages):
# type: (List[Message]) -> List[int]
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
items_for_remote_cache[to_dict_cache_key(changed_message, True)] = \
(MessageDict.to_dict_uncached(changed_message, apply_markdown=True),)
items_for_remote_cache[to_dict_cache_key(changed_message, False)] = \
(MessageDict.to_dict_uncached(changed_message, apply_markdown=False),)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile, message, content, rendered_content):
# type: (UserProfile, Message, Optional[Text], Optional[Text]) -> None
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
log_event(event)
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um):
# type: (UserMessage) -> Dict[str, Any]
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile, message, subject, propagate_mode, content, rendered_content):
# type: (UserProfile, Message, Optional[Text], str, Optional[Text], Optional[Text]) -> None
event = {'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {} # type: Dict[str, Any]
changed_messages = [message]
# Set first_rendered_content to be the oldest version of the
# rendered content recorded; which is the current version if the
# content hasn't been edited before. Note that because one could
# have edited just the subject, not every edit history event
# contains a prev_rendered_content element.
first_rendered_content = message.rendered_content
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
for old_edit_history_event in edit_history:
if 'prev_rendered_content' in old_edit_history_event:
first_rendered_content = old_edit_history_event['prev_rendered_content']
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
# We are turning off diff highlighting everywhere until ticket #1532 is addressed.
if False:
# Don't highlight message edit diffs on prod
rendered_content = highlight_html_differences(first_rendered_content, rendered_content)
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
prev_content = edit_history_event['prev_content']
if Message.content_has_attachment(prev_content) or Message.content_has_attachment(message.content):
check_attachment_reference_change(prev_content, message)
if subject is not None:
orig_subject = message.topic_name()
subject = truncate_topic(subject)
event["orig_subject"] = orig_subject
event["propagate_mode"] = propagate_mode
message.subject = subject
event["stream_id"] = message.recipient.type_id
event["subject"] = subject
event['subject_links'] = bugdown.subject_links(message.sender.realm_id, subject)
edit_history_event["prev_subject"] = orig_subject
if propagate_mode in ["change_later", "change_all"]:
propagate_query = Q(recipient = message.recipient, subject = orig_subject)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = now() - datetime.timedelta(days=2)
propagate_query = (propagate_query & ~Q(id = message.id) &
Q(pub_date__range=(before_bound, now())))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related()
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=subject)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the remote cache update requires the new value
m.subject = subject
changed_messages += messages_list
message.last_edit_time = timezone.now()
event['edit_timestamp'] = datetime_to_timestamp(message.last_edit_time)
edit_history_event['timestamp'] = event['edit_timestamp']
if message.edit_history is not None:
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
log_event(event)
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um):
# type: (UserMessage) -> Dict[str, Any]
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(event, list(map(user_info, ums)))
def encode_email_address(stream):
# type: (Stream) -> Text
return encode_email_address_helper(stream.name, stream.email_token)
def encode_email_address_helper(name, email_token):
# type: (Text, Text) -> Text
# Some deployments may not use the email gateway
if settings.EMAIL_GATEWAY_PATTERN == '':
return ''
# Given the fact that we have almost no restrictions on stream names and
# that what characters are allowed in e-mail addresses is complicated and
# dependent on context in the address, we opt for a very simple scheme:
#
# Only encode the stream name (leave the + and token alone). Encode
# everything that isn't alphanumeric plus _ as the percent-prefixed integer
# ordinal of that character, padded with zeroes to the maximum number of
# bytes of a UTF-8 encoded Unicode character.
encoded_name = re.sub("\W", lambda x: "%" + str(ord(x.group(0))).zfill(4), name)
encoded_token = "%s+%s" % (encoded_name, email_token)
return settings.EMAIL_GATEWAY_PATTERN % (encoded_token,)
def get_email_gateway_message_string_from_address(address):
# type: (Text) -> Optional[Text]
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
if settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
# Accept mails delivered to any Zulip server
pattern_parts[-1] = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK
match_email_re = re.compile("(.*?)".join(pattern_parts))
match = match_email_re.match(address)
if not match:
return None
msg_string = match.group(1)
return msg_string
def decode_email_address(email):
# type: (Text) -> Tuple[Text, Text]
# Perform the reverse of encode_email_address. Returns a tuple of (streamname, email_token)
msg_string = get_email_gateway_message_string_from_address(email)
if '.' in msg_string:
# Workaround for Google Groups and other programs that don't accept emails
# that have + signs in them (see Trac #2102)
encoded_stream_name, token = msg_string.split('.')
else:
encoded_stream_name, token = msg_string.split('+')
stream_name = re.sub("%\d{4}", lambda x: unichr(int(x.group(0)[1:])), encoded_stream_name)
return stream_name, token
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile):
# type: (UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
sub_dicts = Subscription.objects.select_related("recipient").filter(
user_profile = user_profile,
recipient__type = Recipient.STREAM).values(
"recipient__type_id", "in_home_view", "color", "desktop_notifications",
"audible_notifications", "active", "pin_to_top")
stream_ids = set([sub["recipient__type_id"] for sub in sub_dicts])
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values("id", "name", "invite_only", "realm_id",
"realm__domain", "email_token", "description")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["recipient__type_id"]] for sub in sub_dicts
if sub["recipient__type_id"] in stream_hash]
streams_subscribed_map = dict((sub["recipient__type_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
subscriber_map = bulk_get_subscriber_user_ids(all_streams, user_profile, streams_subscribed_map)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["recipient__type_id"])
stream = stream_hash.get(sub["recipient__type_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
subscribers = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore.
if stream["invite_only"] and not sub["active"]:
subscribers = None
stream_dict = {'name': stream["name"],
'in_home_view': sub["in_home_view"],
'invite_only': stream["invite_only"],
'color': sub["color"],
'desktop_notifications': sub["desktop_notifications"],
'audible_notifications': sub["audible_notifications"],
'pin_to_top': sub["pin_to_top"],
'stream_id': stream["id"],
'description': stream["description"],
'email_address': encode_email_address_helper(stream["name"], stream["email_token"])}
if subscribers is not None:
stream_dict['subscribers'] = subscribers
if sub["active"]:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
# Listing public streams are disabled for Zephyr mirroring realms.
if user_profile.realm.is_zephyr_mirror_realm:
never_subscribed_stream_ids = set() # type: Set[int]
else:
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
if not stream['invite_only']:
stream_dict = {'name': stream['name'],
'invite_only': stream['invite_only'],
'stream_id': stream['id'],
'description': stream['description']}
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(user_profile):
# type: (UserProfile) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
user_ids = set()
for subs in [subscribed, unsubscribed, never_subscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = [email_dict[user_id] for user_id in sub['subscribers']]
return (subscribed, unsubscribed)
def get_status_dict(requesting_user_profile):
# type: (UserProfile) -> Dict[Text, Dict[Text, Dict[str, Any]]]
if requesting_user_profile.realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)
return UserPresence.get_status_dict_by_realm(requesting_user_profile.realm_id)
def get_realm_user_dicts(user_profile):
# type: (UserProfile) -> List[Dict[str, Text]]
return [{'email': userdict['email'],
'user_id': userdict['id'],
'is_admin': userdict['is_realm_admin'],
'is_bot': userdict['is_bot'],
'full_name': userdict['full_name']}
for userdict in get_active_user_dicts_in_realm(user_profile.realm)]
def get_cross_realm_dicts():
# type: () -> List[Dict[str, Any]]
users = [get_user_profile_by_email(email) for email in get_cross_realm_emails()]
return [{'email': user.email,
'user_id': user.id,
'is_admin': user.is_realm_admin,
'is_bot': user.is_bot,
'full_name': user.full_name}
for user in users]
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile, event_types, queue_id):
# type: (UserProfile, Optional[Iterable[str]], str) -> Dict[str, Any]
state = {'queue_id': queue_id} # type: Dict[str, Any]
if event_types is None:
want = lambda msg_type: True
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('message'):
# The client should use get_old_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = ujson.loads(user_profile.muted_topics)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
state['realm_name'] = user_profile.realm.name
state['realm_restricted_to_domain'] = user_profile.realm.restricted_to_domain
state['realm_invite_required'] = user_profile.realm.invite_required
state['realm_invite_by_admins_only'] = user_profile.realm.invite_by_admins_only
state['realm_authentication_methods'] = user_profile.realm.authentication_methods_dict()
state['realm_create_stream_by_admins_only'] = user_profile.realm.create_stream_by_admins_only
state['realm_add_emoji_by_admins_only'] = user_profile.realm.add_emoji_by_admins_only
state['realm_allow_message_editing'] = user_profile.realm.allow_message_editing
state['realm_message_content_edit_limit_seconds'] = user_profile.realm.message_content_edit_limit_seconds
state['realm_default_language'] = user_profile.realm.default_language
state['realm_waiting_period_threshold'] = user_profile.realm.waiting_period_threshold
if want('realm_domain'):
state['realm_domain'] = user_profile.realm.domain
if want('realm_emoji'):
state['realm_emoji'] = user_profile.realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(user_profile.realm_id)
if want('realm_user'):
state['realm_users'] = get_realm_user_dicts(user_profile)
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if want('referral'):
state['referrals'] = {'granted': user_profile.invites_granted,
'used': user_profile.invites_used}
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(user_profile)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags'):
# There's no initial data for message flag updates, client will
# get any updates during a session from get_events()
pass
if want('stream'):
state['streams'] = do_get_streams(user_profile)
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(get_default_streams_for_realm(user_profile.realm))
if want('update_display_settings'):
state['twenty_four_hour_time'] = user_profile.twenty_four_hour_time
state['left_side_userlist'] = user_profile.left_side_userlist
default_language = user_profile.default_language
if user_profile.default_language == 'zh_HANS':
# NB: remove this once we upgrade to Django 1.9
# zh-cn and zh-tw will be replaced by zh-hans and zh-hant in
# Django 1.9
default_language = 'zh_CN'
state['default_language'] = default_language
if want('update_global_notifications'):
state['enable_stream_desktop_notifications'] = user_profile.enable_stream_desktop_notifications
state['enable_stream_sounds'] = user_profile.enable_stream_sounds
state['enable_desktop_notifications'] = user_profile.enable_desktop_notifications
state['enable_sounds'] = user_profile.enable_sounds
state['enable_offline_email_notifications'] = user_profile.enable_offline_email_notifications
state['enable_offline_push_notifications'] = user_profile.enable_offline_push_notifications
state['enable_online_push_notifications'] = user_profile.enable_online_push_notifications
state['enable_digest_emails'] = user_profile.enable_digest_emails
return state
def apply_events(state, events, user_profile):
# type: (Dict[str, Any], Iterable[Dict[str, Any]], UserProfile) -> None
for event in events:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
def our_person(p):
# type: (Dict[str, Any]) -> bool
return p['email'] == person['email']
if event['op'] == "add":
state['realm_users'].append(person)
elif event['op'] == "remove":
state['realm_users'] = [user for user in state['realm_users'] if not our_person(user)]
elif event['op'] == 'update':
for p in state['realm_users']:
if our_person(p):
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state and
user_profile.email == person['email']):
if p['is_admin'] and not person['is_admin']:
state['realm_bots'] = []
if not p['is_admin'] and person['is_admin']:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# Now update the person
p.update(person)
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
state['realm_bots'] = [b for b in state['realm_bots'] if b['email'] != email]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
stream_data['subscribers'] = []
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
elif event['type'] == "subscription":
if event['op'] in ["add"]:
# Convert the user_profile IDs to emails since that's what register() returns
# TODO: Clean up this situation
for item in event["subscriptions"]:
item["subscribers"] = [get_user_profile_by_email(email).id for email in item["subscribers"]]
def name(sub):
# type: (Dict[str, Any]) -> Text
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
state['presences'][event['email']] = event['presence']
elif event['type'] == "update_message":
# The client will get the updated message directly
pass
elif event['type'] == "referral":
state['referrals'] = event['referrals']
elif event['type'] == "update_message_flags":
# The client will get the message with the updated flags directly
pass
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
if event['setting_name'] == "twenty_four_hour_time":
state['twenty_four_hour_time'] = event["setting"]
if event['setting_name'] == 'left_side_userlist':
state['left_side_userlist'] = event["setting"]
elif event['type'] == "update_global_notifications":
if event['notification_name'] == "enable_stream_desktop_notifications":
state['enable_stream_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_stream_sounds":
state['enable_stream_sounds'] = event['setting']
elif event['notification_name'] == "enable_desktop_notifications":
state['enable_desktop_notifications'] = event['setting']
elif event['notification_name'] == "enable_sounds":
state['enable_sounds'] = event['setting']
elif event['notification_name'] == "enable_offline_email_notifications":
state['enable_offline_email_notifications'] = event['setting']
elif event['notification_name'] == "enable_offline_push_notifications":
state['enable_offline_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_online_push_notifications":
state['enable_online_push_notifications'] = event['setting']
elif event['notification_name'] == "enable_digest_emails":
state['enable_digest_emails'] = event['setting']
else:
raise ValueError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile, user_client, apply_markdown=True,
event_types=None, queue_lifespan_secs=0, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, Optional[Iterable[str]], int, bool, Iterable[Sequence[Text]]) -> Dict[str, Any]
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
queue_id = request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if event_types is not None:
event_types_set = set(event_types) # type: Optional[Set[str]]
else:
event_types_set = None
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile)
if events:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def do_send_confirmation_email(invitee, referrer):
# type: (PreregistrationUser, UserProfile) -> None
"""
Send the confirmation/welcome e-mail to an invited user.
`invitee` is a PreregistrationUser.
`referrer` is a UserProfile.
"""
subject_template_path = 'confirmation/invite_email_subject.txt'
body_template_path = 'confirmation/invite_email_body.txt'
context = {'referrer': referrer,
'support_email': settings.ZULIP_ADMINISTRATOR,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS}
if referrer.realm.is_zephyr_mirror_realm:
subject_template_path = 'confirmation/mituser_invite_email_subject.txt'
body_template_path = 'confirmation/mituser_invite_email_body.txt'
Confirmation.objects.send_confirmation(
invitee, invitee.email, additional_context=context,
subject_template_path=subject_template_path,
body_template_path=body_template_path, host=referrer.realm.host)
@statsd_increment("push_notifications")
def handle_push_notification(user_profile_id, missed_message):
# type: (int, Dict[str, Any]) -> None
try:
user_profile = get_user_profile_by_id(user_profile_id)
if not (receives_offline_notifications(user_profile) or receives_online_notifications(user_profile)):
return
umessage = UserMessage.objects.get(user_profile=user_profile,
message__id=missed_message['message_id'])
message = umessage.message
if umessage.flags.read:
return
sender_str = message.sender.full_name
apple = num_push_devices_for_user(user_profile, kind=PushDeviceToken.APNS)
android = num_push_devices_for_user(user_profile, kind=PushDeviceToken.GCM)
if apple or android:
# TODO: set badge count in a better way
# Determine what alert string to display based on the missed messages
if message.recipient.type == Recipient.HUDDLE:
alert = "New private group message from %s" % (sender_str,)
elif message.recipient.type == Recipient.PERSONAL:
alert = "New private message from %s" % (sender_str,)
elif message.recipient.type == Recipient.STREAM:
alert = "New mention from %s" % (sender_str,)
else:
alert = "New Zulip mentions and private messages from %s" % (sender_str,)
if apple:
apple_extra_data = {'message_ids': [message.id]}
send_apple_push_notification(user_profile, alert, badge=1, zulip=apple_extra_data)
if android:
content = message.content
content_truncated = (len(content) > 200)
if content_truncated:
content = content[:200] + "..."
android_data = {
'user': user_profile.email,
'event': 'message',
'alert': alert,
'zulip_message_id': message.id, # message_id is reserved for CCS
'time': datetime_to_timestamp(message.pub_date),
'content': content,
'content_truncated': content_truncated,
'sender_email': message.sender.email,
'sender_full_name': message.sender.full_name,
'sender_avatar_url': get_avatar_url(message.sender.avatar_source, message.sender.email),
}
if message.recipient.type == Recipient.STREAM:
android_data['recipient_type'] = "stream"
android_data['stream'] = get_display_recipient(message.recipient)
android_data['topic'] = message.subject
elif message.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL):
android_data['recipient_type'] = "private"
send_android_push_notification(user_profile, android_data)
except UserMessage.DoesNotExist:
logging.error("Could not find UserMessage with message_id %s" % (missed_message['message_id'],))
def is_inactive(email):
# type: (Text) -> None
try:
if get_user_profile_by_email(email).is_active:
raise ValidationError(u'%s is already active' % (email,))
except UserProfile.DoesNotExist:
pass
def user_email_is_unique(email):
# type: (Text) -> None
try:
get_user_profile_by_email(email)
raise ValidationError(u'%s is already registered' % (email,))
except UserProfile.DoesNotExist:
pass
def do_invite_users(user_profile, invitee_emails, streams):
# type: (UserProfile, SizedTextIterable, Iterable[Stream]) -> Tuple[Optional[str], Dict[str, Union[List[Tuple[Text, str]], bool]]]
validated_emails = [] # type: List[Text]
errors = [] # type: List[Tuple[Text, str]]
skipped = [] # type: List[Tuple[Text, str]]
ret_error = None # type: Optional[str]
ret_error_data = {} # type: Dict[str, Union[List[Tuple[Text, str]], bool]]
for email in invitee_emails:
if email == '':
continue
try:
validators.validate_email(email)
except ValidationError:
errors.append((email, _("Invalid address.")))
continue
if not email_allowed_for_realm(email, user_profile.realm):
errors.append((email, _("Outside your domain.")))
continue
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
skipped.append((email, _("Already has an account.")))
continue
validated_emails.append(email)
if errors:
ret_error = _("Some emails did not validate, so we didn't send any invitations.")
ret_error_data = {'errors': errors + skipped, 'sent_invitations': False}
return ret_error, ret_error_data
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
ret_error = _("We weren't able to invite anyone.")
ret_error_data = {'errors': skipped, 'sent_invitations': False}
return ret_error, ret_error_data
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile)
# We save twice because you cannot associate a ManyToMany field
# on an unsaved object.
prereg_user.save()
prereg_user.streams = streams
prereg_user.save()
event = {"email": prereg_user.email, "referrer_email": user_profile.email}
queue_json_publish("invites", event,
lambda event: do_send_confirmation_email(prereg_user, user_profile))
if skipped:
ret_error = _("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!")
ret_error_data = {'errors': skipped, 'sent_invitations': True}
return ret_error, ret_error_data
def send_referral_event(user_profile):
# type: (UserProfile) -> None
event = dict(type="referral",
referrals=dict(granted=user_profile.invites_granted,
used=user_profile.invites_used))
send_event(event, [user_profile.id])
def do_refer_friend(user_profile, email):
# type: (UserProfile, Text) -> None
content = ('Referrer: "%s" <%s>\n'
'Realm: %s\n'
'Referred: %s') % (user_profile.full_name, user_profile.email,
user_profile.realm.domain, email)
subject = "Zulip referral: %s" % (email,)
from_email = '"%s" <%s>' % (user_profile.full_name, 'referrals@zulip.com')
to_email = '"Zulip Referrals" <zulip+referrals@zulip.com>'
headers = {'Reply-To': '"%s" <%s>' % (user_profile.full_name, user_profile.email,)}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
referral = Referral(user_profile=user_profile, email=email)
referral.save()
user_profile.invites_used += 1
user_profile.save(update_fields=['invites_used'])
send_referral_event(user_profile)
def notify_realm_emoji(realm):
# type: (Realm) -> None
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
send_event(event, user_ids)
def check_add_realm_emoji(realm, name, img_url, author=None):
# type: (Realm, Text, Text, Optional[UserProfile]) -> None
emoji = RealmEmoji(realm=realm, name=name, img_url=img_url, author=author)
emoji.full_clean()
emoji.save()
notify_realm_emoji(realm)
def do_remove_realm_emoji(realm, name):
# type: (Realm, Text) -> None
RealmEmoji.objects.get(realm=realm, name=name).delete()
notify_realm_emoji(realm)
def notify_alert_words(user_profile, words):
# type: (UserProfile, Iterable[Text]) -> None
event = dict(type="alert_words", alert_words=words)
send_event(event, [user_profile.id])
def do_add_alert_words(user_profile, alert_words):
# type: (UserProfile, Iterable[Text]) -> None
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile, alert_words):
# type: (UserProfile, Iterable[Text]) -> None
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile, alert_words):
# type: (UserProfile, List[Text]) -> None
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_set_muted_topics(user_profile, muted_topics):
# type: (UserProfile, Union[List[List[Text]], List[Tuple[Text, Text]]]) -> None
user_profile.muted_topics = ujson.dumps(muted_topics)
user_profile.save(update_fields=['muted_topics'])
event = dict(type="muted_topics", muted_topics=muted_topics)
send_event(event, [user_profile.id])
def notify_realm_filters(realm):
# type: (Realm) -> None
realm_filters = realm_filters_for_realm(realm.id)
user_ids = [userdict['id'] for userdict in get_active_user_dicts_in_realm(realm)]
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(event, user_ids)
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm, pattern, url_format_string):
# type: (Realm, Text, Text) -> int
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm, pattern=None, id=None):
# type: (Realm, Optional[Text], Optional[int]) -> None
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def realm_aliases(realm):
# type: (Realm) -> List[Text]
return [alias.domain for alias in realm.realmalias_set.all()]
def get_occupied_streams(realm):
# type: (Realm) -> QuerySet
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm,
user_profile__is_active=True).values('recipient_id')
stream_ids = Recipient.objects.filter(
type=Recipient.STREAM, id__in=subs_filter).values('type_id')
return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
def do_get_streams(user_profile, include_public=True, include_subscribed=True,
include_all_active=False, include_default=False):
# type: (UserProfile, bool, bool, bool, bool) -> List[Dict[str, Any]]
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
# Listing public streams are disabled for Zephyr mirroring realms.
include_public = include_public and not user_profile.realm.is_zephyr_mirror_realm
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if not include_all_active:
user_subs = Subscription.objects.select_related("recipient").filter(
active=True, user_profile=user_profile,
recipient__type=Recipient.STREAM)
if include_subscribed:
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
if include_public:
invite_only_check = Q(invite_only=False)
if include_subscribed and include_public:
query = query.filter(recipient_check | invite_only_check)
elif include_public:
query = query.filter(invite_only_check)
elif include_subscribed:
query = query.filter(recipient_check)
else:
# We're including nothing, so don't bother hitting the DB.
query = []
streams = [(row.to_dict()) for row in query]
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def do_claim_attachments(message):
# type: (Message) -> List[Tuple[Text, bool]]
attachment_url_list = attachment_url_re.findall(message.content)
results = []
for url in attachment_url_list:
path_id = attachment_url_to_path_id(url)
user_profile = message.sender
is_message_realm_public = False
if message.recipient.type == Recipient.STREAM:
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if path_id is not None:
is_claimed = claim_attachment(user_profile, path_id, message,
is_message_realm_public)
results.append((path_id, is_claimed))
return results
def do_delete_old_unclaimed_attachments(weeks_ago):
# type: (int) -> None
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(prev_content, message):
# type: (Text, Message) -> None
new_content = message.content
prev_attachments = set(attachment_url_re.findall(prev_content))
new_attachments = set(attachment_url_re.findall(new_content))
to_remove = list(prev_attachments - new_attachments)
path_ids = []
for url in to_remove:
path_id = attachment_url_to_path_id(url)
path_ids.append(path_id)
attachments_to_update = Attachment.objects.filter(path_id__in=path_ids).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message)
|
{
"content_hash": "e2f8d72e5c933589d66efca3749f4bc4",
"timestamp": "",
"source": "github",
"line_count": 3766,
"max_line_length": 223,
"avg_line_length": 44.40706319702602,
"alnum_prop": 0.6227449667238709,
"repo_name": "niftynei/zulip",
"id": "e704702b9fb00f209a29403e86c72c61d2f6d473",
"size": "167237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "253334"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "484619"
},
{
"name": "JavaScript",
"bytes": "1449321"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82787"
},
{
"name": "Python",
"bytes": "3153710"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "37195"
}
],
"symlink_target": ""
}
|
from Graph import *
import Queue
"""
Construct the graph with a series of obstacles that can still be navigated around
Source: http://www.redblobgames.com/pathfinding/a-star/introduction.html
"""
xSize = 5
ySize = 10
obstacles = [(0,1), (2,4), (2,5),
(2,6), (4,2), (4,3),
(2,0), (2,1), (2,2),
(2,3)]
graph = Graph(xSize, ySize, obstacles)
higherCostingNodes = [(3,5), (3,6)]
for node in higherCostingNodes:
graph.setCost(node, 5)
"""
Breadth first search with Dijkstra's Algorithm
Uses the flood fill mapping we've done previously but
now calculates the shorted path between the startNode and goalNode
This implementation allows us to assign cost to each node
so you can define which areas are more or less efficient to walk through
Dijkstra's algorithm is then used to work out the optimal path through the nodes
"""
startNode = (0,2)
goalNode = (4,1)
frontier = Queue.PriorityQueue()
frontier.put(startNode, 0)
came_from = {}
cost_so_far = {}
came_from[startNode] = None #Python version of "null"
cost_so_far[startNode] = 0
# Construct a map of all possible paths for the startNode across the map
while not frontier.empty():
current = frontier.get() # Get instead of peek, dequeues the item
for neighbour in graph.getNeighbours(current):
new_cost = cost_so_far[current] + graph.getCost(neighbour)
if neighbour not in cost_so_far or new_cost < cost_so_far[neighbour]:
cost_so_far[neighbour] = new_cost
priority = new_cost
frontier.put(neighbour, priority)
came_from[neighbour] = current
# Create the path between the startNode and goalNode
currentNode = goalNode
path = [currentNode]
while currentNode != startNode:
currentNode = came_from[currentNode]
path.append(currentNode)
# Output the resulting path graphically to the command line
resultingGrid = "\n"
for x in range(xSize):
for y in range(ySize):
if (x,y) in obstacles:
resultingGrid += " # "
elif (x,y) == startNode:
resultingGrid += " S "
elif (x,y) == goalNode:
resultingGrid += " G "
elif (x,y) in path:
resultingGrid += "---"
elif (x,y) in higherCostingNodes:
resultingGrid += "..."
else:
resultingGrid += " . "
resultingGrid +="\n"
print resultingGrid
|
{
"content_hash": "b77e506e1374ea190718b98a4f949dad",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 83,
"avg_line_length": 25.488888888888887,
"alnum_prop": 0.6756756756756757,
"repo_name": "lukemerrett/PathFindingInPython",
"id": "97ab22de3a1daa327ff238cac99cfd0578c6a680",
"size": "2294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BreadthFirstWithCost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9402"
}
],
"symlink_target": ""
}
|
"""Reproduce an Light state."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
import logging
from types import MappingProxyType
from typing import Any, cast
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, HomeAssistant, State
from . import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_MODE,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_TRANSITION,
ATTR_WHITE,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_UNKNOWN,
COLOR_MODE_WHITE,
COLOR_MODE_XY,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
ATTR_GROUP = [
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_WHITE_VALUE,
ATTR_TRANSITION,
]
COLOR_GROUP = [
ATTR_HS_COLOR,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_XY_COLOR,
# The following color attributes are deprecated
ATTR_PROFILE,
ATTR_COLOR_NAME,
ATTR_KELVIN,
]
COLOR_MODE_TO_ATTRIBUTE = {
COLOR_MODE_COLOR_TEMP: (ATTR_COLOR_TEMP, ATTR_COLOR_TEMP),
COLOR_MODE_HS: (ATTR_HS_COLOR, ATTR_HS_COLOR),
COLOR_MODE_RGB: (ATTR_RGB_COLOR, ATTR_RGB_COLOR),
COLOR_MODE_RGBW: (ATTR_RGBW_COLOR, ATTR_RGBW_COLOR),
COLOR_MODE_RGBWW: (ATTR_RGBWW_COLOR, ATTR_RGBWW_COLOR),
COLOR_MODE_WHITE: (ATTR_WHITE, ATTR_BRIGHTNESS),
COLOR_MODE_XY: (ATTR_XY_COLOR, ATTR_XY_COLOR),
}
DEPRECATED_GROUP = [
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_FLASH,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_TRANSITION,
]
DEPRECATION_WARNING = (
"The use of other attributes than device state attributes is deprecated and will be removed in a future release. "
"Invalid attributes are %s. Read the logs for further details: https://www.home-assistant.io/integrations/scene/"
)
def _color_mode_same(cur_state: State, state: State) -> bool:
"""Test if color_mode is same."""
cur_color_mode = cur_state.attributes.get(ATTR_COLOR_MODE, COLOR_MODE_UNKNOWN)
saved_color_mode = state.attributes.get(ATTR_COLOR_MODE, COLOR_MODE_UNKNOWN)
# Guard for scenes etc. which where created before color modes were introduced
if saved_color_mode == COLOR_MODE_UNKNOWN:
return True
return cast(bool, cur_color_mode == saved_color_mode)
async def _async_reproduce_state(
hass: HomeAssistant,
state: State,
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Warn if deprecated attributes are used
deprecated_attrs = [attr for attr in state.attributes if attr in DEPRECATED_GROUP]
if deprecated_attrs:
_LOGGER.warning(DEPRECATION_WARNING, deprecated_attrs)
# Return if we are already at the right state.
if (
cur_state.state == state.state
and _color_mode_same(cur_state, state)
and all(
check_attr_equal(cur_state.attributes, state.attributes, attr)
for attr in ATTR_GROUP + COLOR_GROUP
)
):
return
service_data: dict[str, Any] = {ATTR_ENTITY_ID: state.entity_id}
if reproduce_options is not None and ATTR_TRANSITION in reproduce_options:
service_data[ATTR_TRANSITION] = reproduce_options[ATTR_TRANSITION]
if state.state == STATE_ON:
service = SERVICE_TURN_ON
for attr in ATTR_GROUP:
# All attributes that are not colors
if attr in state.attributes:
service_data[attr] = state.attributes[attr]
if (
state.attributes.get(ATTR_COLOR_MODE, COLOR_MODE_UNKNOWN)
!= COLOR_MODE_UNKNOWN
):
# Remove deprecated white value if we got a valid color mode
service_data.pop(ATTR_WHITE_VALUE, None)
color_mode = state.attributes[ATTR_COLOR_MODE]
if parameter_state := COLOR_MODE_TO_ATTRIBUTE.get(color_mode):
parameter, state_attr = parameter_state
if state_attr not in state.attributes:
_LOGGER.warning(
"Color mode %s specified but attribute %s missing for: %s",
color_mode,
state_attr,
state.entity_id,
)
return
service_data[parameter] = state.attributes[state_attr]
else:
# Fall back to Choosing the first color that is specified
for color_attr in COLOR_GROUP:
if color_attr in state.attributes:
service_data[color_attr] = state.attributes[color_attr]
break
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistant,
states: Iterable[State],
*,
context: Context | None = None,
reproduce_options: dict[str, Any] | None = None,
) -> None:
"""Reproduce Light states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
def check_attr_equal(
attr1: MappingProxyType, attr2: MappingProxyType, attr_str: str
) -> bool:
"""Return true if the given attributes are equal."""
return attr1.get(attr_str) == attr2.get(attr_str)
|
{
"content_hash": "79c106b83152288609fcc54f70c20396",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 118,
"avg_line_length": 29.31924882629108,
"alnum_prop": 0.6257806244995997,
"repo_name": "FreekingDean/home-assistant",
"id": "77e5742bbab53462030e7d6fce21a5fe67455bb0",
"size": "6245",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/light/reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2335"
},
{
"name": "Python",
"bytes": "36746639"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import itertools
import json
import random
import sys
import time
import glanceclient
import glanceclient.exc
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
from nova import exception
import nova.image.download as image_xfers
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
glance_opts = [
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance hostname or IP address'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port'),
cfg.StrOpt('glance_protocol',
default='http',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance api servers available to nova. '
'Prefix with https:// for ssl-based glance api servers. '
'([hostname|ip]:port)'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number of retries when downloading an image from glance'),
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.import_opt('auth_strategy', 'nova.api.auth')
CONF.import_opt('my_ip', 'nova.netconf')
def generate_glance_url():
"""Generate the URL to glance."""
glance_host = CONF.glance_host
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
return "%s://%s:%d" % (CONF.glance_protocol, glance_host,
CONF.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, host, port)
:raises ValueError
"""
o = urlparse.urlparse(image_href)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
image_id = o.path.split('/')[-1]
use_ssl = (o.scheme == 'https')
return (image_id, host, port, use_ssl)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user', None),
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(context.roles),
'X-Identity-Status': status,
'X-Service-Catalog': json.dumps(context.service_catalog),
}
def _create_glance_client(context, host, port, use_ssl, version=1):
"""Instantiate a new glanceclient.Client object."""
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_compression'] = False
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
# NOTE(isethi): Glanceclient <= 0.9.0.49 accepts only
# keyword 'token', but later versions accept both the
# header 'X-Auth-Token' and 'token'
params['token'] = context.auth_token
params['identity_headers'] = generate_identity_headers(context)
if utils.is_valid_ipv6(host):
#if so, it is ipv6 address, need to wrap it with '[]'
host = '[%s]' % host
endpoint = '%s://%s:%s' % (scheme, host, port)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers():
"""Shuffle a list of CONF.glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
for api_server in CONF.glance_api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
o = urlparse.urlparse(api_server)
port = o.port or 80
host = o.netloc.rsplit(':', 1)[0]
if host[0] == '[' and host[-1] == ']':
host = host[1:-1]
use_ssl = (o.scheme == 'https')
api_servers.append((host, port, use_ssl))
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, host=None, port=None, use_ssl=False,
version=1):
if host is not None:
self.client = self._create_static_client(context,
host, port,
use_ssl, version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, host, port, use_ssl, version):
"""Create a client that we'll use for every call."""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.host, self.port, self.use_ssl = self.api_servers.next()
return _create_glance_client(context,
self.host, self.port,
self.use_ssl, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
host = self.host
port = self.port
extra = "retrying"
error_msg = (_("Error contacting glance server "
"'%(host)s:%(port)s' for '%(method)s', "
"%(extra)s.") %
{'host': host, 'port': port,
'method': method, 'extra': extra})
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg)
raise exception.GlanceConnectionFailed(
host=host, port=port, reason=str(e))
LOG.exception(error_msg)
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
#NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in download_modules.iteritems():
if scheme not in CONF.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
fmt = _('When loading the module %(module_str)s the '
'following error occurred: %(ex)s')
LOG.error(fmt % {'module_str': str(mod), 'ex': ex})
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params(kwargs)
try:
images = self._client.call(context, 1, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 1, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = _translate_from_glance(image)
return base_image_meta
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception as ex:
LOG.error(_("Failed to instantiate the download handler "
"for %(scheme)s") % {'scheme': scheme})
return
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.allowed_direct_url_schemes and dst_path is not None:
locations = _get_locations(self._client, context, image_id)
for entry in locations:
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
msg = _("Successfully transferred "
"using %s") % o.scheme
LOG.info(msg)
return
except Exception as ex:
LOG.exception(ex)
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
return image_chunks
else:
try:
for chunk in image_chunks:
data.write(chunk)
finally:
if close_file:
data.close()
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = _translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
try:
recv_service_image_meta = self._client.call(
context, 1, 'create', **sent_service_image_meta)
except glanceclient.exc.HTTPException:
_reraise_translated_exception()
return _translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
#NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 1, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return _translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
:raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_id)
return True
def _get_locations(client, context, image_id):
"""Returns the direct url representing the backend storage location,
or None if this attribute is not shown by Glance.
"""
try:
image_meta = client.call(context, 2, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not _is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
locations = getattr(image_meta, 'locations', [])
du = getattr(image_meta, 'direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
return locations
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'page_size', 'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
def _is_image_public(image):
# NOTE(jaypipes) V2 Glance API replaced the is_public attribute
# with a visibility attribute. We do this here to prevent the
# glanceclient for a V2 image model from throwing an
# exception from warlock when trying to access an is_public
# attribute.
if hasattr(image, 'visibility'):
return str(image.visibility).lower() == 'public'
else:
return image.is_public
if context.is_admin or _is_image_public(image):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
def _translate_from_glance(image):
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image):
#NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
else:
output[attr] = getattr(image, attr)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
raise new_exc, None, exc_trace
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
raise new_exc, None, exc_trace
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(unicode(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(unicode(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(unicode(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(unicode(exc_value))
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
#NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_host, glance_port, use_ssl) = \
_parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
host=glance_host, port=glance_port, use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
class UpdateGlanceImage(object):
def __init__(self, context, image_id, metadata, stream):
self.context = context
self.image_id = image_id
self.metadata = metadata
self.image_stream = stream
def start(self):
image_service, image_id = (
get_remote_image_service(self.context, self.image_id))
image_service.update(self.context, image_id, self.metadata,
self.image_stream, purge_props=False)
|
{
"content_hash": "39bdc212b2f02a1969f76d5ace0d70f6",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 79,
"avg_line_length": 36.287581699346404,
"alnum_prop": 0.5991084293948127,
"repo_name": "nkrinner/nova",
"id": "cb3bd6585845dd56829831878b703a70448d2282",
"size": "22844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/image/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import splunktaforpuppetenterprise_declare
import os
import sys
import time
import datetime
import json
import modinput_wrapper.base_modinput
from solnlib.packages.splunklib import modularinput as smi
import input_module_puppet_enterprise_overview_enforcement as input_module
bin_dir = os.path.basename(__file__)
'''
Do not edit this file!!!
This file is generated by Add-on builder automatically.
Add your modular input logic to file input_module_puppet_enterprise_overview_enforcement.py
'''
class ModInputpuppet_enterprise_overview_enforcement(modinput_wrapper.base_modinput.BaseModInput):
def __init__(self):
if 'use_single_instance_mode' in dir(input_module):
use_single_instance = input_module.use_single_instance_mode()
else:
use_single_instance = False
super(ModInputpuppet_enterprise_overview_enforcement, self).__init__("splunktaforpuppetenterprise", "puppet_enterprise_overview_enforcement", use_single_instance)
self.global_checkbox_fields = None
def get_scheme(self):
"""overloaded splunklib modularinput method"""
scheme = super(ModInputpuppet_enterprise_overview_enforcement, self).get_scheme()
scheme.title = ("Puppet Enterprise Overview Enforcement")
scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.")
scheme.use_external_validation = True
scheme.streaming_mode_xml = True
scheme.add_argument(smi.Argument("name", title="Name",
description="",
required_on_create=True))
"""
For customized inputs, hard code the arguments here to hide argument detail from users.
For other input types, arguments should be get from input_module. Defining new input types could be easier.
"""
scheme.add_argument(smi.Argument("puppet_enterprise_server_", title="Puppet Enterprise Server:",
description="Put in your FQDN of your Puppet Enterprise Server so the links backs on the dashboards work correctly.",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("server_", title="Server:",
description="Input your Puppet Enterprise Server address.",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("token_", title="Token:",
description="curl -k -X POST -H \'Content-Type: application/json\' -d \'{\"login\": \"\", \"password\": \"\",\"lifetime\": \"9y\" }\' https://$:4433/rbac-api/v1/auth/token",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("port_", title="Port:",
description="Input your Puppet Enterprise DB Port (HTTPS 8081, HTTP: 8080)",
required_on_create=True,
required_on_edit=False))
return scheme
def get_app_name(self):
return "SplunkTAforPuppetEnterprise"
def validate_input(self, definition):
"""validate the input stanza"""
input_module.validate_input(self, definition)
def collect_events(self, ew):
"""write out the events"""
input_module.collect_events(self, ew)
def get_account_fields(self):
account_fields = []
return account_fields
def get_checkbox_fields(self):
checkbox_fields = []
return checkbox_fields
def get_global_checkbox_fields(self):
if self.global_checkbox_fields is None:
checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json')
try:
if os.path.isfile(checkbox_name_file):
with open(checkbox_name_file, 'r') as fp:
self.global_checkbox_fields = json.load(fp)
else:
self.global_checkbox_fields = []
except Exception as e:
self.log_error('Get exception when loading global checkbox parameter names. ' + str(e))
self.global_checkbox_fields = []
return self.global_checkbox_fields
if __name__ == "__main__":
exitcode = ModInputpuppet_enterprise_overview_enforcement().run(sys.argv)
sys.exit(exitcode)
|
{
"content_hash": "ad2f6288394c476a7fef80252d2d082c",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 214,
"avg_line_length": 45.970588235294116,
"alnum_prop": 0.5941565365749627,
"repo_name": "domeger/SplunkTAforPuppetEnterprise",
"id": "ef71982e340b7227acb8795c2cd543cce7a68ce1",
"size": "4689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/puppet_enterprise_overview_enforcement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "4504"
},
{
"name": "HTML",
"bytes": "5156"
},
{
"name": "Python",
"bytes": "5659367"
}
],
"symlink_target": ""
}
|
import logging
# Dagda logger class
class DagdaLogger(logging.Logger):
# -- Init
logging.basicConfig(format='<%(asctime)s> <%(levelname)s> <DagdaServer> <%(module)s> <%(funcName)s:%(lineno)d> ' +
'<%(message)s>')
_logger = logging.getLogger('DagdaLogger')
_logger.setLevel('DEBUG')
# -- Static methods
@staticmethod
def get_logger():
return DagdaLogger._logger
|
{
"content_hash": "9e0c2f56c954025f0fb856d5c84c60a5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 118,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.5944700460829493,
"repo_name": "eliasgranderubio/dagda",
"id": "53d5527e21aea038f90018657db3a989ff436e78",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dagda/log/dagda_logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "233"
},
{
"name": "Makefile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "400625"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
}
|
"""
Tests that test the value of individual items
"""
from unittest import TestCase
import validictory
class TestEnum(TestCase):
schema = {"enum": ["test", True, 123, ["???"]]}
schema2 = {"enum": ("test", True, 123, ["???"])}
def test_enum_pass(self):
data = ["test", True, 123, ["???"]]
try:
for item in data:
validictory.validate(item, self.schema)
validictory.validate(item, self.schema2)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_enum_fail(self):
data = "unknown"
self.assertRaises(ValueError, validictory.validate, data, self.schema)
class TestPattern(TestCase):
# match simplified regular expression for an e-mail address
schema = {"pattern":
"^[A-Za-z0-9][A-Za-z0-9\.]*@([A-Za-z0-9]+\.)+[A-Za-z0-9]+$"}
def test_pattern_pass(self):
data = "my.email01@gmail.com"
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_pattern_pass_nonstring(self):
data = 123
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_pattern_fail(self):
data = "whatever"
self.assertRaises(ValueError, validictory.validate, data, self.schema)
def validate_format_contains_spaces(validator, fieldname, value,
format_option):
if ' ' in value:
return
raise validictory.FieldValidationError(
"Value %(value)r of field '%(fieldname)s' does not contain any spaces,"
"but it should" % locals(), fieldname, value)
class TestFormat(TestCase):
schema_datetime = {"format": "date-time"}
schema_date = {"format": "date"}
schema_time = {"format": "time"}
schema_utcmillisec = {"format": "utc-millisec"}
schema_ip = {"format": "ip-address"}
schema_spaces = {"format": "spaces"}
def test_format_datetime_pass(self):
data = "2011-01-13T10:56:53Z"
try:
validictory.validate(data, self.schema_datetime)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_date_pass(self):
data = "2011-01-13"
try:
validictory.validate(data, self.schema_date)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_time_pass(self):
data = "10:56:53"
try:
validictory.validate(data, self.schema_time)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_utcmillisec_pass(self):
try:
validictory.validate(1294915735, self.schema_utcmillisec)
validictory.validate(1294915735.0, self.schema_utcmillisec)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_datetime_nonexisting_day_fail(self):
data = "2013-13-13T00:00:00Z"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_datetime)
def test_format_datetime_feb29_fail(self):
data = "2011-02-29T00:00:00Z"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_datetime)
def test_format_datetime_notutc_fail(self):
data = "2011-01-13T10:56:53+01: 00"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_datetime)
def test_format_datetime_fail(self):
data = "whatever"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_datetime)
def test_format_datetime_bad_type(self):
data = 3
self.assertRaises(ValueError, validictory.validate, data,
self.schema_datetime)
def test_format_date_fail(self):
data = "whatever"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_date)
def test_format_time_fail(self):
data = "whatever"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_time)
def test_format_utcmillisec_fail(self):
data = "whatever"
self.assertRaises(ValueError, validictory.validate, data,
self.schema_utcmillisec)
def test_format_utcmillisec_negative_fail(self):
data = -1
self.assertRaises(ValueError, validictory.validate, data,
self.schema_utcmillisec)
def test_format_ip_pass(self):
valids = ["0.0.0.0", "255.255.255.255"]
for ip in valids:
try:
validictory.validate(ip, self.schema_ip)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_ip_fail(self):
invalids = [1.2, "bad", {"test": "blah"}, [32, 49], 1284, True,
"-0.-0.-0.-0", "-1.-1.-1.-1", "256.256.256.256"]
for ip in invalids:
self.assertRaises(ValueError, validictory.validate, ip,
self.schema_ip)
def test_format_required_false(self):
schema = {
'type': 'object',
'properties': {
'startdate': {'type': 'string', 'format': 'date-time',
'required': False}
}
}
try:
validictory.validate({}, schema, required_by_default=False)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_custom_unregistered_pass(self):
data = 'No-spaces-here'
try:
# no custom validator installed, so no error
validictory.validate(data, self.schema_spaces)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_custom_instantiated_pass(self):
data = 'Here are spaces'
validator = validictory.SchemaValidator(
{'spaces': validate_format_contains_spaces})
try:
# validator installed, but data validates
validator.validate(data, self.schema_spaces)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_custom_registered_pass(self):
data = 'Here are spaces'
validator = validictory.SchemaValidator()
validator.register_format_validator('spaces',
validate_format_contains_spaces)
try:
# validator registered, but data validates
validator.validate(data, self.schema_spaces)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_format_custom_registered_fail(self):
data = 'No-spaces-here'
validator = validictory.SchemaValidator(
{'spaces': validate_format_contains_spaces})
# validator registered, but data does not conform
self.assertRaises(ValueError, validator.validate, data,
self.schema_spaces)
class TestUniqueItems(TestCase):
schema = {"uniqueItems": True}
schema_false = {"uniqueItems": False}
def test_uniqueitems_pass(self):
data = [1, 2, 3]
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_pass_string(self):
data = ['1', '2', '3']
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_pass_nested_array(self):
'''
uniqueItems only applies for the array it was specified on and not to
all datastructures nested within.
'''
data = [[1, [5, 5]], [2, [5, 5]]]
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_pass_not_an_array(self):
data = 13 # it's pretty unique
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_pass_different_types(self):
data = [1, "1"]
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_false_pass(self):
data = [1, 1, 1]
try:
validictory.validate(data, self.schema_false)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_uniqueitems_fail(self):
data = [1, 1, 1]
self.assertRaises(ValueError, validictory.validate, data, self.schema)
def test_uniqueitems_fail_nested_arrays(self):
data = [[1, 2, 3], [1, 2, 3]]
self.assertRaises(ValueError, validictory.validate, data, self.schema)
def test_uniqueitems_fail_nested_objects(self):
data = [{'one': 1, 'two': 2}, {'one': 1, 'two': 2}]
self.assertRaises(ValueError, validictory.validate, data, self.schema)
def test_uniqueitems_fail_null(self):
data = [None, None]
self.assertRaises(ValueError, validictory.validate, data, self.schema)
class TestMaximum(TestCase):
props = {
"prop01": {"type": "number", "maximum": 10},
"prop02": {"type": "integer", "maximum": 20}
}
props_exclusive = {
"prop": {"type": "integer", "maximum": 20, "exclusiveMaximum": True},
}
schema = {"type": "object", "properties": props}
schema_exclusive = {"type": "object", "properties": props_exclusive}
def test_maximum_pass(self):
# Test less than
data1 = {"prop01": 5, "prop02": 10}
# Test equal
data2 = {"prop01": 10, "prop02": 20}
try:
validictory.validate(data1, self.schema)
validictory.validate(data2, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_maximum_exclusive_pass(self):
# Test less than
data = {"prop": 19}
try:
validictory.validate(data, self.schema_exclusive)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_maximum_fail(self):
# Test number
data1 = {"prop01": 11, "prop02": 19}
# Test integer
data2 = {"prop01": 9, "prop02": 21}
self.assertRaises(ValueError, validictory.validate, data1, self.schema)
self.assertRaises(ValueError, validictory.validate, data2, self.schema)
def test_maximum_exclusive_fail(self):
# Test equal
data = {"prop": 20}
self.assertRaises(ValueError, validictory.validate, data,
self.schema_exclusive)
class TestMinimum(TestCase):
props = {
"prop01": {"type": "number", "minimum": 10},
"prop02": {"type": "integer", "minimum": 20}
}
props_exclusive = {
"prop": {"type": "integer", "minimum": 20, "exclusiveMinimum": True},
}
schema = {"type": "object", "properties": props}
schema_exclusive = {"type": "object", "properties": props_exclusive}
def test_minimum_pass(self):
# Test greater than
data1 = {"prop01": 21, "prop02": 21}
# Test equal
data2 = {"prop01": 10, "prop02": 20}
try:
validictory.validate(data1, self.schema)
validictory.validate(data2, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_minimum_exclusive_pass(self):
# Test greater than
data = {"prop": 21}
try:
validictory.validate(data, self.schema_exclusive)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_minimum_fail(self):
# Test number
data1 = {"prop01": 9, "prop02": 21}
# Test integer
data2 = {"prop01": 10, "prop02": 19}
self.assertRaises(ValueError, validictory.validate, data1, self.schema)
self.assertRaises(ValueError, validictory.validate, data2, self.schema)
def test_minimum_exclusive_fail(self):
# Test equal
data = {"prop": 20}
self.assertRaises(ValueError, validictory.validate, data,
self.schema_exclusive)
class TestMinLength(TestCase):
schema = {"minLength": 4}
def test_minLength_pass(self):
# str-equal, str-gt, list-equal, list-gt
data = ['test', 'string', [1, 2, 3, 4], [0, 0, 0, 0, 0]]
try:
for item in data:
validictory.validate(item, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_minLength_pass_nonstring(self):
# test when data is not a string
data1 = 123
try:
validictory.validate(data1, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_minLength_fail(self):
# test equal
data = ["car", [1, 2, 3]]
for item in data:
self.assertRaises(ValueError, validictory.validate, data,
self.schema)
class TestMaxLength(TestCase):
schema = {"maxLength": 4}
def test_maxLength_pass(self):
# str-equal, str-lt, list-equal, list-lt
data = ["test", "car", [1, 2, 3, 4], [0, 0, 0]]
try:
for item in data:
validictory.validate(item, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_maxLength_pass_nonstring(self):
# test when data is not a string
data1 = 12345
try:
validictory.validate(data1, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_maxLength_fail(self):
data = ["string", [1, 2, 3, 4, 5]]
for item in data:
self.assertRaises(ValueError, validictory.validate, item,
self.schema)
class TestBlank(TestCase):
def test_blank_default_false(self):
schema = {
"type": "object",
"properties": {
"key": {
"type": "string",
"required": True,
}
}
}
try:
validictory.validate({"key": "value"}, {}, blank_by_default=False)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
self.assertRaises(ValueError, validictory.validate, {"key": ""},
schema)
def test_blank_default_true(self):
schema = {
"type": "object",
"properties": {
"key": {
"type": "string",
"required": True,
}
}
}
try:
validictory.validate({"key": ""}, schema, blank_by_default=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_blank_false(self):
schema = {"blank": False}
try:
validictory.validate("test", schema, blank_by_default=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
self.assertRaises(ValueError, validictory.validate, "", schema)
def test_blank_true(self):
try:
validictory.validate("", {"blank": True}, blank_by_default=False)
validictory.validate("test", {"blank": True},
blank_by_default=False)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
class TestDivisibleBy(TestCase):
schema = {'type': 'number', 'divisibleBy': 12}
schema0 = {'type': 'number', 'divisibleBy': 0}
def test_divisibleBy_pass(self):
data = 60
try:
validictory.validate(data, self.schema)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_divisibleBy_fail(self):
data = 13
self.assertRaises(ValueError, validictory.validate, data, self.schema)
def test_divisibleBy_ZeroDivisionError_fail(self):
data = 60
self.assertRaises(ValueError, validictory.validate, data, self.schema0)
|
{
"content_hash": "a62ab81fb13d965d334ac8d26bd4beec",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 79,
"avg_line_length": 31.425891181988742,
"alnum_prop": 0.5636417910447761,
"repo_name": "simon-weber/validictory",
"id": "61dc5c61416dd214dbc67fcc332c371426776e44",
"size": "16750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validictory/tests/test_values.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from warnings import warn
import pandas as pd
from zipline.assets import Asset
from zipline.utils.input_validation import expect_types
from .utils.enum import enum
from zipline._protocol import BarData # noqa
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__.update(initial_values)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
def _deprecated_getitem_method(name, attrs):
"""Create a deprecated ``__getitem__`` method that tells users to use
getattr instead.
Parameters
----------
name : str
The name of the object in the warning message.
attrs : iterable[str]
The set of allowed attributes.
Returns
-------
__getitem__ : callable[any, str]
The ``__getitem__`` method to put in the class dict.
"""
attrs = frozenset(attrs)
msg = (
"'{name}[{attr!r}]' is deprecated, please use"
" '{name}.{attr}' instead"
)
def __getitem__(self, key):
"""``__getitem__`` is deprecated, please use attribute access instead.
"""
warn(msg.format(name=name, attr=key), DeprecationWarning, stacklevel=2)
if key in attrs:
return self.__dict__[key]
raise KeyError(key)
return __getitem__
class Order(Event):
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'order', {
'dt',
'sid',
'amount',
'stop',
'limit',
'id',
'filled',
'commission',
'stop_reached',
'limit_reached',
'created',
},
)
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'portfolio', {
'capital_used',
'starting_cash',
'portfolio_value',
'pnl',
'returns',
'cash',
'positions',
'start_date',
'positions_value',
},
)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.total_positions_exposure = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __repr__(self):
return "Account({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'account', {
'settled_cash',
'accrued_interest',
'buying_power',
'equity_with_loan',
'total_positions_value',
'total_positions_exposure',
'regt_equity',
'regt_margin',
'initial_margin_requirement',
'maintenance_margin_requirement',
'available_funds',
'excess_liquidity',
'cushion',
'day_trades_remaining',
'leverage',
'net_leverage',
'net_liquidation',
},
)
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset):
self.asset = asset
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
@property
def sid(self):
# for backwards compatibility
return self.asset
def __repr__(self):
return "Position({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
# Copied from Position and renamed. This is used to handle cases where a user
# does something like `context.portfolio.positions[100]` instead of
# `context.portfolio.positions[sid(100)]`.
class _DeprecatedSidLookupPosition(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
def __repr__(self):
return "_DeprecatedSidLookupPosition({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
class Positions(dict):
def __missing__(self, key):
if isinstance(key, Asset):
return Position(key)
elif isinstance(key, int):
warn("Referencing positions by integer is deprecated."
" Use an asset instead.")
else:
warn("Position lookup expected a value of type Asset but got {0}"
" instead.".format(type(key).__name__))
return _DeprecatedSidLookupPosition(key)
|
{
"content_hash": "33aaf6214cb1ab00c6a500373a4331ab",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 27.396491228070175,
"alnum_prop": 0.5642930327868853,
"repo_name": "florentchandelier/zipline",
"id": "5d94434733a11e486637718f39fb9196b713d679",
"size": "8390",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "zipline/protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7014"
},
{
"name": "Dockerfile",
"bytes": "2480"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "162383"
},
{
"name": "PowerShell",
"bytes": "3269"
},
{
"name": "Python",
"bytes": "3677457"
},
{
"name": "Shell",
"bytes": "7420"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.