prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Here is the source code:\n```python\n#!/usr/bin/env python\n\n\"\"\"@See preprocessed data\n\"\"\"\nfrom numpy import*\nimport matplotlib.pyplot as plt\nfrom GestureModel import*\nfrom Creator import*\nfrom Classifier import*\n\n\n\ndef plotResults(gr_points,gr_sig, b_points,b_sig,name_model):\n ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"@See preprocessed data\n\"\"\"\nfrom numpy import*\nimport matplotlib.pyplot as plt\nfrom GestureModel import*\nfrom Creator import*\nfrom Classifier import*\n\n\n\ndef plotResults(gr_points,gr_sig, b_points,b_sig,... | ```python
#!/usr/bin/env python
"""@See preprocessed data
"""
from numpy import*
import matplotlib.pyplot as plt
from GestureModel import*
from Creator import*
from Classifier import*
def plotResults(gr_points,gr_sig, b_points,b_sig,name_model):
from scipy import linalg
import matplotlib.pyplot as plt
gr_points = gr_points.transpose()
b_points = b_points.transpose()
gr_sigma = []
b_sigma = []
n,m = gr_points.shape
maximum = zeros((m))
minimum = zeros((m))
x = arange(0,m,1)
for i in range(m):
gr_sigma.append(gr_sig[i*3:i*3+3])
b_sigma.append(b_sig[i*3:i*3+3])
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[0,i]+ sigma[0,0];
minimum[i] = gr_points[0,i]- sigma[0,0];
fig2 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[0])
plt.savefig(name_model+ "_gravity_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[1,i]+ sigma[1,1];
minimum[i] = gr_points[1,i]- sigma[1,1];
fig3 = plt.figure()
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[1])
plt.savefig(name_model+ "_gravity_y_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(gr_sigma[i])
maximum[i] = gr_points[2,i]+ sigma[2,2];
minimum[i] = gr_points[2,i]- sigma[2,2];
fig3 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, gr_points[2])
plt.savefig(name_model+ "_gravity_z_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[0,i]+ sigma[0,0];
minimum[i] = b_points[0,i]- sigma[0,0];
fig4 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[0])
plt.savefig(name_model+ "_body_x_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[1,i]+ sigma[1,1];
minimum[i] = b_points[1,i]- sigma[1,1];
fig5 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[1])
plt.savefig(name_model+ "_body_axis.png")
for i in range(m):
sigma = 3.*linalg.sqrtm(b_sigma[i])
maximum[i] = b_points[2,i]+ sigma[2,2];
minimum[i] = b_points[2,i]- sigma[2,2];
fig6 = plt.figure()
import matplotlib.pyplot as plt
plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 )
plt.plot(x, b_points[2])
plt.savefig(name_model+ "_body_z_axis.png")
#NOTE: Add path
def newModel(name,files):
g = Creator()
#Read the data
g.ReadFiles(files,[])
g.CreateDatasets_Acc()
g.ObtainNumberOfCluster()
gravity = g.gravity
K_gravity = g.K_gravity
body = g.body
K_body = g.K_body
# 2) define the number of points to be used in GMR
# (current settings allow for CONSTANT SPACING only)
numPoints = amax(gravity[0,:]);
scaling_factor = 10/10;
numGMRPoints = math.ceil(numPoints*scaling_factor);
# 3) perform Gaussian Mixture Modelling and Regression to retrieve the
# expected curve and associated covariance matrices for each feature
gr_points, gr_sigma = g.GetExpected(gravity,K_gravity,numGMRPoints)
b_points, b_sigma = g.GetExpected(body,K_body,numGMRPoints)
savetxt(name+"MuGravity.txt", gr_points,fmt='%.12f')
savetxt(name+"SigmaGravity.txt", gr_sigma,fmt='%.12f')
savetxt(name+"MuBody.txt", b_points,fmt='%.12f')
savetxt(name+"SigmaBody.txt", b_sigma,fmt='%.12f')
def loadModel(file_name, th=1, plot=True):
#Load files
gr_points = loadtxt(file_name+"MuGravity.txt")
gr_sigma = loadtxt(file_name+"SigmaGravity.txt")
b_points = loadtxt(file_name+"MuBody.txt")
b_sigma = loadtxt(file_name+"SigmaBody.txt")
#Add model
gm = GestureModel()
gm.addModel("gravity",gr_points, gr_sigma,th)
gm.addModel("body",b_points, b_sigma,th)
if plot == True:
plotResults(gr_points,gr_sigma, b_points,b_sigma,file_name)
return gm
name_models = ['A','B','S1','S2']
num_samples = [10,14,9,10]
th = [25,20,10,65]
create_models = False
list_files = []
#Create a list of the list of files for each model
print "Defining files"
i = 0
for name in name_models:
files = []
for k in range(1,num_samples[i]+1):
files.append('Models/' + name + '/data/mod('+ str(k) + ').txt')
list_files.append(files)
i = i + 1
#Create the models and save the list of files for calculate the weigths
if(create_models == True):
print "Creating models"
i = 0
for model in name_models:
print list_files[i]
newModel(model,list_files[i])
i = i + 1
list_models = []
print "Loading models"
#Load the models
for j in range(len(name_models)):
#For the moment don't put True is there are more that 2 models in Ubuntu
gm = loadModel(name_models[j],th[j],False)
list_models.append(gm)
print "Calculating weigths"
#Used to calculate the weights
v0 = Classifier()
for j in range(len(name_models)):
print "\nFor model " + name_models[j] + ":"
w_g, w_b = v0.calculateW(list_files[j],list_models[j])
list_models[j].addWeight("gravity",w_g)
list_models[j].addWeight("body",w_b)
print "\n Init classifers"
l_class = []
for j in range(len(name_models)):
l_class.append(Classifier())
print "Give the model to each classifier"
for j in range(len(name_models)):
l_class[j].classify(list_models[j])
print "Validation"
sfile = "validation/mix3.txt"
import matplotlib.pyplot as plt
fig = plt.figure()
for j in range(len(name_models)):
poss = l_class[j].validate_from_file(sfile, ',')
m,n = poss.shape
x = arange(0,m,1)
plt.plot(x, poss,'o',label= name_models[j])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.savefig("result.png")
print "Finish ..."
``` |
[
{
"content": "```python\n#!/usr/bin/env python\nimport logging\nimport math\nimport os\n\nimport monetdb.sql as db\n\nfrom src.errors import SourceException, ImageStateError\nfrom src.gsmconnectionmanager import GSMConnectionManager\nfrom src.gsmlogger import get_gsm_logger\nfrom src.sqllist import get_sql, get... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\nimport logging\nimport math\nimport os\n\nimport monetdb.sql as db\n\nfrom src.errors import SourceException, ImageStateError\nfrom src.gsmconnectionmanager import GSMConnectionManager\nfrom src.gsmlogger import get_gsm_logger\nfrom src.sqllist imp... | ```python
#!/usr/bin/env python
import logging
import math
import os
import monetdb.sql as db
from src.errors import SourceException, ImageStateError
from src.gsmconnectionmanager import GSMConnectionManager
from src.gsmlogger import get_gsm_logger
from src.sqllist import get_sql, get_svn_version, GLOBALS
from src.grouper import Grouper
from src.updater import run_update
from src.utils import get_pixels, load_parameters
from src.matcher import MatcherF90, MatcherSQL
from src.resolveFlux import FluxResolver
from src.resolveQuad import QuadResolver
from src.resolveSimple import SimpleResolver
class GSMPipeline(object):
"""
General pipeline class.
"""
def __init__(self, custom_cm=None, use_monet=None,
profile=False,
**params):
"""
@param custom_cm: allows to pass an object to be used as connection
manager.
"""
self.log = get_gsm_logger('pipeline', 'pipeline.log')
self.use_monet = use_monet
if not custom_cm:
if use_monet != None:
self.conn_manager = GSMConnectionManager(use_monet=use_monet)
else:
self.conn_manager = GSMConnectionManager()
else:
self.conn_manager = custom_cm
try:
self.conn = self.conn_manager.get_connection(**params)
if profile:
self.conn.profile = True
self.conn.log.setLevel(logging.DEBUG)
self.conn.commit()
except db.Error as exc:
self.log.error("Failed to connect: %s" % exc)
raise exc
self.options = load_parameters('%s/settings.ini' %
os.path.dirname(__file__))
self.log.debug('Pipeline parameters: %s' % self.options)
self.log.info('Pipeline started.')
def reopen_connection(self, **params):
"""
Reopen connection in case it was closed.
"""
if not self.conn or not self.conn.established():
try:
self.conn = self.conn_manager.get_connection(**params)
self.log.info('Pipeline connection reopened.')
except db.Error as exc:
self.log.error("Failed to connect: %s" % exc)
raise exc
def read_image(self, source):
"""
Read image and detections from a given source.
"""
if source:
source.read_and_store_data(self.conn)
else:
raise SourceException('No source specified.')
def run_parset(self, parset):
"""
Process single parset file.
"""
self.conn.start()
parset.process(self.conn)
self.parset = parset
self.process_image(parset.image_id, parset.run_id)
self.log.info('Parset %s done.' % parset.filename)
return parset.image_id
def run_grouper(self):
"""
Detect/update and store groups of sources for later processing.
"""
#Update groups by merging overlapping patches.
cursor = self.conn.get_cursor(get_sql("GroupFinder"))
grouper = Grouper(cursor.fetchall())
while grouper.is_completed():
grouper.one_cycle()
self.conn.execute_set(get_sql("GroupUpdate",
grouper.group,
",".join(map(str, grouper.runcatset))))
grouper.cleanup()
for resolver in [SimpleResolver]:
self.run_resolver(resolver)
self.conn.execute(get_sql("GroupFill"))
def run_resolver(self, resolve_class):
#Running resolver
resolver = resolve_class(self.conn)
for group_id in self.conn.get_cursor(get_sql("GroupCycle")):
if not resolver.run_resolve(group_id[0]):
#Failed to resolve
self.log.debug("Group id %s not resolved by %s." %
(group_id[0], resolver.__class__.__name__))
self.conn.log.debug("Group id %s not resolved." % group_id[0])
self.conn.execute_set(get_sql("GroupUpdate runcat",
group_id[0]))
else:
self.log.debug("Group id %s resolved by %s." %
(group_id[0], resolver.__class__.__name__))
self.conn.log.debug("Group id %s resolved." % group_id[0])
def update_image_pointing(self, image_id):
"""
Update image pointing to average ra/decl of all sources.
"""
avg_x, avg_y, avg_z, count = self.conn.exec_return(
get_sql('Image properties selector', image_id),
single_column=False)
avg_x, avg_y, avg_z = avg_x / count, avg_y / count, avg_z / count
decl = math.asin(avg_z)
ra = math.atan2(avg_x, avg_y)
self.conn.execute(get_sql('Image properties updater',
ra, decl, image_id))
def process_image(self, image_id, run_id=None, sources_loaded=False):
"""
Process single image.
@sources_loaded: True if there are records in the extractedsources
already.
"""
self.conn.start()
status, band, stokes, fov_radius, \
centr_ra, centr_decl, run_loaded, bmaj = \
self.conn.exec_return("""
select status, band, stokes, fov_radius,
centr_ra, centr_decl, run_id, bmaj
from images
where imageid = %s;""" % image_id, single_column=False)
if not run_id:
run_id = run_loaded
if status == 1:
raise ImageStateError('Image %s in state 1 (Ok). Cannot process' %
image_id)
GLOBALS.update({'i': image_id, 'r': run_id,
'b': band, 's': stokes})
if not sources_loaded:
self.conn.execute(get_sql('insert_extractedsources'))
self.conn.execute(get_sql('insert dummysources'))
if bmaj:
max_assoc = float(bmaj)
else:
max_assoc = float(self.options.get('maximum_association_distance'))
self.log.debug('Using options: %s' % self.options)
self.log.debug('Final max_assoc_dist %s' % max_assoc)
#Now do the matching!
if self.options.get('matcher') == 'F90':
matcher_class = MatcherF90
else:
matcher_class = MatcherSQL
matcher = matcher_class(self.conn, max_assoc,
self.options.get('match_distance'),
self.options.get('match_distance_extended'),
get_pixels(centr_ra, centr_decl, fov_radius + 0.5))
matcher.match(image_id)
self.conn.call_procedure("fill_temp_assoc_kind(%s);" % image_id)
#Process many-to-many;
self.run_grouper()
# Process one-to-one associations;
self.conn.execute(get_sql('add 1 to 1'))
#process one-to-many associations;
self.conn.execute(get_sql('add 1 to N'))
self.conn.execute_set(get_sql('update flux_fraction'))
#process many-to-one associations;
self.conn.execute_set(get_sql('add N to 1'))
#updating runningcatalog
run_update(self.conn, 'update runningcatalog')
run_update(self.conn, 'update runningcatalog extended')
self.conn.execute(get_sql('update runningcatalog XYZ'))
#First update, then insert new (!!!)
run_update(self.conn, 'update runningcatalog_fluxes')
self.conn.execute(get_sql('insert new bands for point sources'))
#inserting new sources
self.conn.execute_set(get_sql('Insert new sources'))
self.conn.execute_set(get_sql('Join extended'))
#update image status and save current svn verion.
self.conn.execute_set(get_sql('Cleanup', get_svn_version()))
if self.parset.recalculate_pointing:
self.update_image_pointing(image_id)
self.conn.commit()
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom .._externals.ppca import PPCA\nfrom .._shared.helpers import get_type\n\n\ndef format_data(x, vectorizer='CountVectorizer',\n semantic='LatentDirichletAllocat... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom .._externals.ppca import PPCA\nfrom .._shared.helpers import get_type\n\n\ndef format_data(x, vectorizer='CountVectorizer',\n semantic='Latent... | ```python
import warnings
import numpy as np
import six
from .._externals.ppca import PPCA
from .._shared.helpers import get_type
def format_data(x, vectorizer='CountVectorizer',
semantic='LatentDirichletAllocation', corpus='wiki', ppca=True, text_align='hyper'):
"""
Formats data into a list of numpy arrays
This function is useful to identify rows of your array that contain missing
data or nans. The returned indices can be used to remove the rows with
missing data, or label the missing data points that are interpolated
using PPCA.
Parameters
----------
x : numpy array, dataframe, string or (mixed) list
The data to convert
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
ppca : bool
Performs PPCA to fill in missing values (default: True)
text_align : str
Alignment algorithm to use when both text and numerical data are passed.
If numerical arrays have the same shape, and the text data contains the
same number of samples, the text and numerical data are automatically
aligned to a common space. Example use case: an array of movie frames
(frames by pixels) and text descriptions of the frame. In this case,
the movie and text will be automatically aligned to the same space
(default: hyperalignment).
Returns
----------
data : list of numpy arrays
A list of formatted arrays
"""
# not sure why i needed to import here, but its the only way I could get it to work
from .df2mat import df2mat
from .text2mat import text2mat
from ..datageometry import DataGeometry
# if x is not a list, make it one
if type(x) is not list:
x = [x]
if all([isinstance(xi, six.string_types) for xi in x]):
x = [x]
# check data type for each element in list
dtypes = list(map(get_type, x))
# handle text data:
if any(map(lambda x: x in ['list_str', 'str', 'arr_str'], dtypes)):
# default text args
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
# filter text data
text_data = []
for i,j in zip(x, dtypes):
if j in ['list_str', 'str', 'arr_str']:
text_data.append(np.array(i).reshape(-1, 1))
# convert text to numerical matrices
text_data = text2mat(text_data, **text_args)
# replace the text data with transformed data
processed_x = []
textidx=0
for i, dtype in enumerate(dtypes):
if dtype in ['list_str', 'str', 'arr_str']:
processed_x.append(text_data[textidx])
textidx+=1
elif dtype == 'df':
processed_x.append(df2mat(x[i]))
elif dtype == 'geo':
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
for j in format_data(x[i].get_data(), **text_args):
processed_x.append(j)
else:
processed_x.append(x[i])
# reshape anything that is 1d
if any([i.ndim<=1 for i in processed_x]):
processed_x = [np.reshape(i,(i.shape[0],1)) if i.ndim==1 else i for i in processed_x]
contains_text = any([dtype in ['list_str', 'str', 'arr_str'] for dtype in dtypes])
contains_num = any([dtype in ['list_num', 'array', 'df', 'arr_num'] for dtype in dtypes])
# if there are any nans in any of the lists, use ppca
if ppca is True:
if contains_num:
num_data = []
for i,j in zip(processed_x, dtypes):
if j in ['list_num', 'array', 'df', 'arr_num']:
num_data.append(i)
if np.isnan(np.vstack(num_data)).any():
warnings.warn('Missing data: Inexact solution computed with PPCA (see https://github.com/allentran/pca-magic for details)')
num_data = fill_missing(num_data)
x_temp = []
for dtype in dtypes:
if dtype in ['list_str', 'str', 'arr_str']:
x_temp.append(text_data.pop(0))
elif dtype in ['list_num', 'array', 'df', 'arr_num']:
x_temp.append(num_data.pop(0))
processed_x = x_temp
# if input data contains both text and numerical data
if contains_num and contains_text:
# and if they have the same number of samples
if np.unique(np.array([i.shape[0] for i, j in zip(processed_x, dtypes)])).shape[0] == 1:
from .align import align as aligner
# align the data
warnings.warn('Numerical and text data with same number of '
'samples detected. Aligning data to a common space.')
processed_x = aligner(processed_x, align=text_align, format_data=False)
return processed_x
def fill_missing(x):
# ppca if missing data
m = PPCA()
m.fit(data=np.vstack(x))
x_pca = m.transform()
# if the whole row is missing, return nans
all_missing = [idx for idx, a in enumerate(np.vstack(x)) if all([type(b)==np.nan for b in a])]
if len(all_missing)>0:
for i in all_missing:
x_pca[i, :] = np.nan
# get the original lists back
if len(x)>1:
x_split = np.cumsum([i.shape[0] for i in x][:-1])
return list(np.split(x_pca, x_split, axis=0))
else:
return [x_pca]
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n## begin license ##\n#\n# \"Weightless\" is a High Performance Asynchronous Networking Library. See http://weightless.io\n#\n# Copyright (C) 2010-2011 Seek You Too (CQ2) http://www.cq2.nl\n# Copyright (C) 2011-2015, 2020-2021 Seecr (Seek You Too B.V.) https://se... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n## begin license ##\n#\n# \"Weightless\" is a High Performance Asynchronous Networking Library. See http://weightless.io\n#\n# Copyright (C) 2010-2011 Seek You Too (CQ2) http://www.cq2.nl\n# Copyright (C) 2011-2015, 2020-2021 Seecr (Seek You Too ... | ```python
# -*- coding: utf-8 -*-
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2010-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2015, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from seecr.test import SeecrTestCase, CallTrace
from seecr.test.io import stderr_replaced
from socket import SHUT_RDWR
from weightless.core import retval, be, Observable
from weightless.io import reactor
from weightless.io.utils import asProcess, sleep
from weightless.http import SocketPool, EmptySocketPool
#import gc
#gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
class SocketPoolTest(SeecrTestCase):
##
## Get & Put'ing
def testGetOnEmptyPool(self):
trace = CallTrace()
sp = SocketPool(reactor=trace)
self.assertEqual(None, retval(sp.getPooledSocket(host='x', port=1025)))
self.assertEqual([], trace.calledMethodNames())
def testPutThenGetThenEmpty(self):
sp = SocketPool(reactor=CallTrace())
result = retval(sp.putSocketInPool(host='x', port=1, sock='mock'))
self.assertEqual(None, result)
self.assertEqual('mock', retval(sp.getPooledSocket(host='x', port=1)))
self.assertEqual(None, retval(sp.getPooledSocket(host='x', port=1)))
def testPut3GetOnlyYours(self):
sp = SocketPool(reactor=CallTrace())
retval(sp.putSocketInPool(host='x', port=1, sock='A'))
retval(sp.putSocketInPool(host='x', port=2, sock='B'))
retval(sp.putSocketInPool(host='y', port=1, sock='C'))
# Unknown host + port
self.assertEqual(None, retval(sp.getPooledSocket(host='xx', port=1)))
self.assertEqual(None, retval(sp.getPooledSocket(host='', port=1)))
self.assertEqual(None, retval(sp.getPooledSocket(host=None, port=1)))
self.assertEqual(None, retval(sp.getPooledSocket(host='x', port=0)))
self.assertEqual(None, retval(sp.getPooledSocket(host='x', port=3)))
# Retrieved once
self.assertEqual('A', retval(sp.getPooledSocket(host='x', port=1)))
def testPutNGetLIFO(self):
sp = SocketPool(reactor=CallTrace())
retval(sp.putSocketInPool(host='x', port=1, sock='A'))
retval(sp.putSocketInPool(host='x', port=1, sock='B'))
retval(sp.putSocketInPool(host='x', port=1, sock='C'))
self.assertEqual('C', retval(sp.getPooledSocket(host='x', port=1)))
self.assertEqual('B', retval(sp.getPooledSocket(host='x', port=1)))
self.assertEqual('A', retval(sp.getPooledSocket(host='x', port=1)))
self.assertEqual(None, retval(sp.getPooledSocket(host='x', port=1)))
def testPutNGet1Put1StillLIFO(self):
sp = SocketPool(reactor=CallTrace())
retval(sp.putSocketInPool(host='example.org', port=80, sock='A'))
retval(sp.putSocketInPool(host='example.org', port=80, sock='B'))
self.assertEqual('B', retval(sp.getPooledSocket(host='example.org', port=80)))
retval(sp.putSocketInPool(host='example.org', port=80, sock='C'))
self.assertEqual('C', retval(sp.getPooledSocket(host='example.org', port=80)))
self.assertEqual('A', retval(sp.getPooledSocket(host='example.org', port=80)))
self.assertEqual(None, retval(sp.getPooledSocket(host='example.org', port=80)))
def testPutEmptyPut(self):
sp = SocketPool(reactor=CallTrace())
retval(sp.putSocketInPool(host='10.0.0.1', port=60000, sock=0))
retval(sp.putSocketInPool(host='10.0.0.1', port=60000, sock=1))
for i in reversed(list(range(2))):
self.assertEqual(i, retval(sp.getPooledSocket(host='10.0.0.1', port=60000)))
self.assertEqual(None, retval(sp.getPooledSocket(host='10.0.0.1', port=60000)))
retval(sp.putSocketInPool(host='10.0.0.1', port=60000, sock=2))
self.assertEqual(2, retval(sp.getPooledSocket(host='10.0.0.1', port=60000)))
def testEmptySocketPool_put_noErrors(self):
sok = MockSok('s')
sp = EmptySocketPool()
result = retval(sp.putSocketInPool(host='whatever', port=-1, sock=sok))
self.assertEqual(None, result)
self.assertEqual(['shutdown', 'close'], sok.log.calledMethodNames())
self.assertEqual(((SHUT_RDWR,), {}), (sok.log.calledMethods[0].args, sok.log.calledMethods[0].kwargs))
self.assertEqual(((), {}), (sok.log.calledMethods[1].args, sok.log.calledMethods[1].kwargs))
def testEmptySocketPool_put_nonfatalErrors(self):
sok = MockSok('s')
def shutdown(*a, **kw):
raise Exception('xcptn')
sok.log.methods['shutdown'] = shutdown
sp = EmptySocketPool()
result = retval(sp.putSocketInPool(host='secure.example.org', port=9999, sock=sok))
self.assertEqual(None, result)
self.assertEqual(['shutdown', 'close'], sok.log.calledMethodNames())
self.assertEqual(((SHUT_RDWR,), {}), (sok.log.calledMethods[0].args, sok.log.calledMethods[0].kwargs))
self.assertEqual(((), {}), (sok.log.calledMethods[1].args, sok.log.calledMethods[1].kwargs))
def testEmptySocketPool_put_fatalErrors(self):
exceptions = [AssertionError('x'), KeyboardInterrupt('x'), SystemExit('x')]
def t():
sok = MockSok('s')
current_exc = []
def shutdown(*a, **kw):
e = exceptions.pop(0)
current_exc.append(e)
raise e
sok.log.methods['shutdown'] = shutdown
sp = EmptySocketPool()
try:
retval(sp.putSocketInPool(host='secure.example.org', port=9999, sock=sok))
except type(current_exc[0]):
pass
else:
self.fail()
self.assertEqual(['shutdown'], sok.log.calledMethodNames())
self.assertEqual(((SHUT_RDWR,), {}), (sok.log.calledMethods[0].args, sok.log.calledMethods[0].kwargs))
for i in range(3):
t()
self.assertEqual(0, len(exceptions))
def testEmptySocketPool_get(self):
sp = EmptySocketPool()
self.assertEqual(None, retval(sp.getPooledSocket(host='whatever', port=-1)))
self.assertEqual(None, retval(sp.getPooledSocket(host='secure.example.org', port=1010)))
##
## Poolsize global limit
def testLimitsMustBeKnown(self):
self.assertRaises(TypeError, lambda: SocketPool(reactor='x', limits={'unknown': 'limit'}))
def testLimitSetNotReached(self):
def test():
sp = SocketPool(reactor=CallTrace(), limits={'totalSize': 3}) # Limits enforced on put, not async.
def fillAndEmpty():
yield sp.putSocketInPool(host='h', port=1, sock='s2')
yield sp.putSocketInPool(host='h', port=1, sock='s1')
yield sp.putSocketInPool(host='h', port=1, sock='s0')
for i in range(3):
self.assertEqual('s{0}'.format(i), (yield sp.getPooledSocket(host='h', port=1)))
yield fillAndEmpty()
yield fillAndEmpty()
asProcess(test())
def testLimitSetReached(self):
def test():
sp = SocketPool(reactor=CallTrace(), limits={'totalSize': 2}) # Limits enforced on put, not async.
def stillPooled():
wasStillPooled = []
while True: # do ... while (fromPool is not None)
fromPool = yield sp.getPooledSocket(host='h', port=1)
if fromPool:
wasStillPooled.append(fromPool)
if fromPool is None: break
return wasStillPooled
s0 = MockSok('s0')
s1 = MockSok('s1')
yield sp.putSocketInPool(host='h', port=1, sock=s0)
yield sp.putSocketInPool(host='h', port=1, sock=s1)
with stderr_replaced() as err:
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('s2'))
self.assertEqual('', err.getvalue(), err.getvalue()) #@@
wasStillPooled = yield stillPooled()
self.assertEqual(['s2', 's0'], wasStillPooled)
self.assertEqual(['shutdown', 'close'], s1.log.calledMethodNames())
shutCall, closeCall = s1.log.calledMethods
self.assertEqual(((SHUT_RDWR,), {}), (shutCall.args, shutCall.kwargs))
self.assertEqual(((), {}), (closeCall.args, closeCall.kwargs))
self.assertEqual([], s0.log.calledMethodNames())
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('s0'))
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('s1'))
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('s2'))
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('s3'))
wasStillPooled = yield stillPooled()
self.assertEqual(['s3', 's0'], wasStillPooled)
asProcess(test())
def testLimitSetReachedWithDifferentDestinations(self):
def test():
sp = SocketPool(reactor=CallTrace(), limits={'totalSize': 2}) # Limits enforced on put, not async.
def stillPooled():
wasStillPooled = []
for destHost, destPort in [('h', 1), ('i', 2), ('j', 3)]:
while True: # do ... while (fromPool is not None)
fromPool = yield sp.getPooledSocket(host=destHost, port=destPort)
if fromPool:
wasStillPooled.append(fromPool)
if fromPool is None: break
return wasStillPooled
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('sH'))
yield sp.putSocketInPool(host='i', port=2, sock=MockSok('sI'))
with stderr_replaced() as err:
yield sp.putSocketInPool(host='j', port=3, sock=MockSok('sJ'))
self.assertEqual('', err.getvalue(), err.getvalue())
wasStillPooled = yield stillPooled()
self.assertEqual(2, len(wasStillPooled))
self.assertTrue(set(wasStillPooled).issubset(set(['sH', 'sI', 'sJ'])))
asProcess(test())
def testLimitDestinationReached(self):
def test():
sp = SocketPool(reactor=CallTrace(), limits={'destinationSize': 2}) # Limits enforced on put, not async.
def stillPooled():
wasStillPooled = []
for destHost, destPort in [('h', 1), ('i', 2), ('j', 3)]:
while True: # do ... while (fromPool is not None)
fromPool = yield sp.getPooledSocket(host=destHost, port=destPort)
if fromPool:
wasStillPooled.append(fromPool)
if fromPool is None: break
return wasStillPooled
sJ = MockSok('sJ')
sJ2 = MockSok('sJ2')
yield sp.putSocketInPool(host='h', port=1, sock=MockSok('sH'))
yield sp.putSocketInPool(host='i', port=2, sock=MockSok('sI'))
yield sp.putSocketInPool(host='j', port=3, sock=sJ)
yield sp.putSocketInPool(host='j', port=3, sock=sJ2)
with stderr_replaced() as err:
yield sp.putSocketInPool(host='j', port=3, sock=MockSok('sJ3'))
self.assertEqual('', err.getvalue(), err.getvalue())
wasStillPooled = yield stillPooled()
self.assertEqual(4, len(wasStillPooled))
self.assertEqual(['sH', 'sI', 'sJ3', 'sJ2'], wasStillPooled)
self.assertEqual(['shutdown', 'close'], sJ.log.calledMethodNames())
asProcess(test())
##
## unusedTimeout (reactor interaction)
def testUnusedTimeoutSetInitialisesTimer(self):
# Whitebox (unusedTimeout -> addTimer)
mockReactor = CallTrace()
SocketPool(reactor=mockReactor, unusedTimeout=0.02)
self.assertEqual(['addTimer'], mockReactor.calledMethodNames())
self.assertEqual(['seconds', 'callback'], list(mockReactor.calledMethods[0].kwargs.keys()))
self.assertEqual(0.02, mockReactor.calledMethods[0].kwargs['seconds'])
# Blackbox
def test():
top = be((Observable(),
(SocketPool(reactor=reactor(), unusedTimeout=0.02),),
))
yield top.any.putSocketInPool(host='x', port=80, sock=MockSok('A'))
yield top.any.putSocketInPool(host='x', port=80, sock=MockSok('B'))
yield sleep(seconds=0.001)
result = yield top.any.getPooledSocket(host='x', port=80)
self.assertEqual('B', result)
yield sleep(seconds=0.04)
result = yield top.any.getPooledSocket(host='x', port=80)
self.assertEqual(None, result)
asProcess(test())
def testUnusedTimeoutOnlyPurgesInactiveSocket(self):
# Blackbox
def test():
sA, sB, sC, s1, s2, s3 = (MockSok(x) for x in ['A', 'B', 'C', 1, 2, 3])
top = be((Observable(),
(SocketPool(reactor=reactor(), unusedTimeout=0.025),),
))
# Make sure 1st check all-sockets-ok
yield sleep(seconds=(0.001))
# Initial set
yield top.any.putSocketInPool(host='x', port=80, sock=sA)
yield top.any.putSocketInPool(host='x', port=80, sock=sB)
yield top.any.putSocketInPool(host='x', port=80, sock=sC)
yield top.any.putSocketInPool(host='example.org', port=8080, sock=s1)
yield top.any.putSocketInPool(host='example.org', port=8080, sock=s2)
yield top.any.putSocketInPool(host='example.org', port=8080, sock=s3)
self.assertEqual([], s2.log.calledMethodNames()) # sample
# Pass time, no timeout - 1st check always all-sockets-ok
yield sleep(seconds=(0.025 + 0.022)) # +/- 0.003 until next mostly-fatal check
self.assertEqual([], s2.log.calledMethodNames()) # sample
# Use some, put some back
_sockC = yield top.any.getPooledSocket(host='x', port=80)
_sockB = yield top.any.getPooledSocket(host='x', port=80)
_sock3 = yield top.any.getPooledSocket(host='example.org', port=8080)
self.assertEqual([sC, sB, s3], [_sockC, _sockB, _sock3])
self.assertEqual([], sC.log.calledMethodNames())
self.assertEqual([], sB.log.calledMethodNames())
self.assertEqual([], s3.log.calledMethodNames())
yield top.any.putSocketInPool(host='x', port=80, sock=sC)
yield top.any.putSocketInPool(host='example.org', port=8080, sock=s3)
yield sleep(seconds=0.015) # 0.025 - (0.015 - 0.003) = 0.013 until all-fatal check
inPool = []
while True:
result = yield top.any.getPooledSocket(host='x', port=80)
if result == None:
break
inPool.append(result)
while True:
result = yield top.any.getPooledSocket(host='example.org', port=8080)
if result == None:
break
inPool.append(result)
self.assertEqual([sC, s3], inPool)
self.assertEqual([], sC.log.calledMethodNames())
self.assertEqual([], s3.log.calledMethodNames())
self.assertEqual(['shutdown', 'close'], s1.log.calledMethodNames()) # sample
shutdown, close = s1.log.calledMethods
self.assertEqual(((SHUT_RDWR,), {}), (shutdown.args, shutdown.kwargs))
self.assertEqual(((), {}), (close.args, close.kwargs))
asProcess(test())
class MockSok(object):
def __init__(self, id):
self._id = id
self.log = CallTrace()
self.close = self.log.close
self.shutdown = self.log.shutdown
def __eq__(self, other):
return self._id == getattr(other, '_id', other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self._id
def __hash__(self):
return hash(self._id)
def __repr__(self):
return '{0}(id={1})'.format(self.__class__.__name__, self._id)
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport warnings\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, Group, Permission, AnonymousUser\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase\n\n\nclass BackendTest(TestCase):\... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport warnings\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User, Group, Permission, AnonymousUser\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase\n\n\nclass Backend... | ```python
import warnings
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
class BackendTest(TestCase):
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
User.objects.create_user('test', 'test@example.com', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
supports_object_permissions = True
# This class also supports tests for anonymous user permissions,
# via subclasses which just set the 'supports_anonymous_user' attribute.
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
# not reached due to supports_anonymous_user = False
return True
return False
def has_module_perms(self, user, app_label):
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = self.curr_auth + (self.backend,)
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test')
self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test')
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
warnings.resetwarnings()
warnings.simplefilter('ignore', PendingDeprecationWarning)
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
content_type=ContentType.objects.get_for_model(Group)
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = True
class NoAnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
class NoAnonymousUserBackendTest(TestCase):
"""
Tests that AnonymousUser does not delegate to backend if it has 'supports_anonymous_user' = False
"""
backend = 'django.contrib.auth.tests.auth_backends.NoAnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = self.curr_auth + (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), False)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set())
``` |
[
{
"content": "Here is a code snippet:\n```python\n# -*- coding: utf-8 -*-\nfrom pybythec import utils\nfrom pybythec.utils import f\nfrom pybythec.utils import PybythecError\nfrom pybythec.BuildStatus import BuildStatus\nfrom pybythec.BuildElements import BuildElements\n\nimport os\nimport sys\nimport time\nfro... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom pybythec import utils\nfrom pybythec.utils import f\nfrom pybythec.utils import PybythecError\nfrom pybythec.BuildStatus import BuildStatus\nfrom pybythec.BuildElements import BuildElements\n\nimport os\nimport sys\n... | ```python
# -*- coding: utf-8 -*-
from pybythec import utils
from pybythec.utils import f
from pybythec.utils import PybythecError
from pybythec.BuildStatus import BuildStatus
from pybythec.BuildElements import BuildElements
import os
import sys
import time
from threading import Thread
log = utils.Logger('pybythec')
__author__ = 'glowtree'
__email__ = 'tom@glowtree.com'
__version__ = '0.9.61'
def getBuildElements(osType = None,
compiler = None,
buildType = None,
binaryFormat = None,
projConfigPath = None,
globalConfigPath = None,
projConfig = None,
globalConfig = None,
currentBuild = None,
libDir = None):
'''
passthrough function that catches and reports exceptions
'''
try:
return BuildElements(
osType = osType,
compiler = compiler,
buildType = buildType,
binaryFormat = binaryFormat,
projConfig = projConfig,
projConfigPath = projConfigPath,
globalConfig = globalConfig,
globalConfigPath = globalConfigPath,
currentBuild = currentBuild,
libDir = libDir)
except PybythecError as e:
log.error(e)
return None
except Exception as e:
log.error('unknown exception: {0}', e)
return None
def build(be = None, builds = None):
'''
be: BuildElements object
builds: list of build overrides
'''
if not be:
be = getBuildElements()
if not be:
return
_runPreScript(be)
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_build(be)
def _build(be):
'''
does the dirty work of compiling and linking based on the state setup in the BuildElements object be
'''
threading = True # TODO: perhaps this could be an function argument
buildStatus = BuildStatus(be.targetFilename, be.buildPath)
# lock - early return
if be.locked and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('locked', '{0} is locked', be.targetName)
return True
startTime = time.time()
log.info('building ' + be.infoStr)
buildingLib = False
if be.libDir:
buildingLib = True
if not os.path.exists(be.installPath):
utils.createDirs(be.installPath)
if not os.path.exists(be.buildPath):
os.makedirs(be.buildPath)
incPathList = []
for incPath in be.incPaths:
if os.path.exists(incPath):
incPathList += ['-I', incPath]
else:
log.warning('incPath {0} doesn\'t exist', incPath)
for extIncPath in be.extIncPaths: # external include libs (for cases where 3rd party header includes are using "" instead of <> ie Unreal)
if os.path.exists(incPath):
incPathList += ['-I', extIncPath]
else:
log.warning('extIncPath {0} doesn\'t exist', extIncPath)
definesList = []
for define in be.defines:
definesList += ['-D', define]
#
# qt moc file compilation, TODO: make this another compiler option, along with asm
#
mocPaths = []
for qtClass in be.qtClasses:
found = False
mocPath = f('{0}/moc_{1}.cpp', be.buildPath, qtClass)
qtClassHeader = qtClass + '.h'
for incPath in be.incPaths: # find the header file, # TODO: should there be a separate list of headers ie be.mocIncPaths?
includePath = incPath + '/' + qtClassHeader
if not os.path.exists(includePath):
continue
if os.path.exists(mocPath) and float(os.stat(mocPath).st_mtime) < float(os.stat(includePath).st_mtime) or not os.path.exists(mocPath):
buildStatus.description = 'qt moc: ' + utils.runCmd(['moc'] + definesList + [includePath, '-o', mocPath])
if not os.path.exists(mocPath):
buildStatus.writeError(buildStatus.description)
return False
mocPaths.append(mocPath)
found = True
if not found:
buildStatus.writeError('can\'t find {0} for qt moc compilation', qtClassHeader)
return False
for mocPath in mocPaths:
be.sources.append(mocPath)
buildStatusDeps = [] # the build status for each dependency: objs and libs
threads = []
i = 0
#
# compile
#
objPaths = []
cmd = [be.compilerCmd, be.objFlag] + incPathList + definesList + be.flags
if threading:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _compileSrc, args = (be, cmd, source, objPaths, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
else:
for source in be.sources:
buildStatusDep = BuildStatus(source)
buildStatusDeps.append(buildStatusDep)
_compileSrc(be, cmd, source, objPaths, buildStatusDep)
i += 1
#
# build library dependencies
#
libCmds = []
libsBuilding = []
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
libName = lib
if be.compiler.startswith('msvc'):
libCmds += [libName + be.staticExt] # you need to link against the .lib stub file even if it's ultimately a .dll that gets linked
else:
libCmds += [be.libFlag, libName]
# check if the lib has a directory for building
if threading:
for libSrcDir in be.libSrcPaths:
libSrcDir = os.path.join(libSrcDir, lib)
if os.path.exists(libSrcDir):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
thread = Thread(None, target = _buildLib, args = (be, libSrcDir, buildStatusDep))
thread.start()
threads.append(thread)
i += 1
break
else:
for libSrcPath in be.libSrcPaths:
if not os.path.exists('libSrcPath'):
log.warning('libSrcPath {0} doesn\'t exist', libSrcPath)
continue
libSrcPath = os.path.join(libSrcPath, lib)
if os.path.exists(libSrcPath):
libsBuilding.append(lib)
buildStatusDep = BuildStatus(lib)
buildStatusDeps.append(buildStatusDep)
_buildLib(be, libSrcDir, buildStatusDep)
i += 1
break
# wait for all the threads before checking the results
for thread in threads:
thread.join()
allUpToDate = True
for buildStatusDep in buildStatusDeps:
if buildStatusDep.status == 'failed':
# NOTE: changed from buildStatusDep.description.encode('ascii', 'ignore') which fixed issue on macOs
buildStatus.writeError('{0} failed because {1} failed because...\n\n{2}\n...determined in seconds\n\n', be.infoStr, buildStatusDep.name,
buildStatusDep.description, str(int(time.time() - startTime)))
return False
elif buildStatusDep.status == 'built':
allUpToDate = False
# revise the library paths
for i in range(len(be.libPaths)):
revisedLibPath = be.libPaths[i] + be.binaryRelPath
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
else: # try without the currentBuild leaf dir, ie 3rd party libs likely won't have them
revisedLibPath = f('{0}/{1}/{2}/{3}/{4}', be.libPaths[i], be.osType, be.buildType, be.compilerVersion, be.binaryFormat)
if os.path.exists(revisedLibPath):
be.libPaths[i] = revisedLibPath
# check for multiple instances of a lib: link erros due to linking to the wrong version of a lib can be a nightmare to debug
# if you don't suspect it's the wrong version
libsFound = {} # lib name, array of paths where it was found
for p in be.libPaths:
for lib in be.libs:
if be.compiler.startswith('msvc'):
staticPath = f('{0}/{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/{1}{2}', p, lib, be.dynamicExt)
else:
staticPath = f('{0}/lib{1}{2}', p, lib, be.staticExt)
dynamicPath = f('{0}/lib{1}{2}', p, lib, be.dynamicExt)
if os.path.exists(staticPath) or os.path.exists(dynamicPath):
if lib in libsFound:
libsFound[lib].append(p)
else:
libsFound[lib] = [p]
for l in libsFound:
libPaths = libsFound[l]
if len(libPaths) > 1:
log.w('lib {0} found in more than one place: {1}\n', l, libPaths)
#
# linking
#
linkCmd = []
if allUpToDate and os.path.exists(be.targetInstallPath):
buildStatus.writeInfo('up to date', '{0} is up to date, determined in {1} seconds\n', be.infoStr, str(int(time.time() - startTime)))
if not buildingLib:
_runPostScript(be)
return True
# microsoft's compiler / linker can only handle so many characters on the command line
msvcLinkCmdFilePath = be.buildPath + '/linkCmd'
if be.compiler.startswith('msvc'):
msvcLinkCmd = f('{0}"{1}" "{2}" {3}', be.targetFlag, be.targetInstallPath, '" "'.join(objPaths), ' '.join(libCmds))
msvcLinkCmdFp = open(msvcLinkCmdFilePath, 'w')
msvcLinkCmdFp.write(msvcLinkCmd)
msvcLinkCmdFp.close()
linkCmd += [be.linker, '@' + msvcLinkCmdFilePath]
if be.showLinkerCmds:
log.info('\nmsvcLinkCmd: {0}\n', msvcLinkCmd)
else:
linkCmd += [be.linker, be.targetFlag, be.targetInstallPath] + objPaths + libCmds
if be.binaryType != 'static': # TODO: is this the case for msvc?
linkCmd += be.linkFlags
if be.binaryType == 'exe' or be.binaryType == 'plugin' or (be.compilerRoot == 'msvc' and be.binaryType == 'dynamic'):
for libPath in be.libPaths:
if not os.path.exists(libPath):
log.warning('libPath {0} doesn\'t exist', libPath)
continue
if be.compiler.startswith('msvc'):
linkCmd += [be.libPathFlag + os.path.normpath(libPath)]
else:
linkCmd += [be.libPathFlag, os.path.normpath(libPath)]
# get the timestamp of the existing target if it exists
linked = False
targetExisted = False
oldTargetTimeStamp = None
if os.path.exists(be.targetInstallPath):
oldTargetTimeStamp = float(os.stat(be.targetInstallPath).st_mtime)
targetExisted = True
if be.showLinkerCmds:
log.info('\n{0}\n', ' '.join(linkCmd))
buildStatus.description = utils.runCmd(linkCmd)
if os.path.exists(be.targetInstallPath):
if targetExisted:
if float(os.stat(be.targetInstallPath).st_mtime) > oldTargetTimeStamp:
linked = True
else:
linked = True
if linked:
log.info('linked ' + be.infoStr)
else:
buildStatus.writeError('linking failed because {0}', buildStatus.description)
return False
# copy dynamic library dependencies to the install path
if be.copyDynamicLibs:
if be.binaryType == 'exe' or be.binaryType == 'plugin':
for lib in be.libs:
for libPath in be.libPaths:
dynamicPath = libPath + '/'
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
dynamicPath += 'lib'
dynamicPath += lib + be.dynamicExt
if os.path.exists(dynamicPath):
utils.copyfile(dynamicPath, be.installPath)
buildStatus.writeInfo('built', '{0} built {1}\ncompleted in {2} seconds\n', be.infoStr, be.targetInstallPath, str(int(time.time() - startTime)))
sys.stdout.flush()
# run a post-build script if it exists
if not buildingLib:
_runPostScript(be)
return True
#
# private functions
#
def _compileSrc(be, compileCmd, source, objPaths, buildStatus):
'''
be (in): BuildElements object
compileCmd (in): the compile command so far
source (in): the c or cpp source file to compile (every source file gets it's own object file)
objPaths (out): list of all object paths that will be passed to the linker
buildStatus (out): build status for this particular compile, defaults to failed
'''
if not os.path.exists(source):
buildStatus.writeError('{0} is missing, exiting build', source)
return
objFile = os.path.basename(source)
objFile = objFile.replace(os.path.splitext(source)[1], be.objExt)
objPath = os.path.join(be.buildPath, objFile)
objPaths.append(objPath)
# check if it's up to date
objExisted = os.path.exists(objPath)
if objExisted:
objTimestamp = float(os.stat(objPath).st_mtime)
if objTimestamp > be.latestConfigTimestamp and not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
buildStatus.status = 'up to date'
return
# if not utils.sourceNeedsBuilding(be.incPaths, source, objTimestamp):
# buildStatus.status = 'up to date'
# return
# Microsoft Visual C has to have the objPathFlag cuddled up directly next to the objPath - no space in between them (grrr)
if be.compiler.startswith('msvc'):
cmd = compileCmd + [source, be.objPathFlag + objPath]
else:
cmd = compileCmd + [source, be.objPathFlag, objPath]
if be.showCompilerCmds:
log.info('\n' + ' '.join(cmd) + '\n')
buildStatus.description = utils.runCmd(cmd)
if os.path.exists(objPath):
if objExisted:
if float(os.stat(objPath).st_mtime) > objTimestamp:
buildStatus.status = 'built'
else:
buildStatus.status = 'built'
if buildStatus.status == 'built':
buildStatus.description = 'compiled ' + os.path.basename(source)
else:
log.error('{0} failed to build', objPath)
def _buildLib(be, libSrcDir, buildStatus):
'''
'''
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libSrcDir)
if not libBe:
return
build(libBe)
# read the build status
buildStatus.readFromFile(libSrcDir, be.buildDir, be.binaryRelPath)
def clean(be = None, builds = None):
'''
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
return
except Exception as e:
log.error('unknown exception: {0}', e)
return
_clean(be)
def _clean(be = None):
'''
cleans the current project
be (in): BuildElements object
'''
# remove any dynamic libs that are sitting next to the exe
if os.path.exists(be.installPath) and (be.binaryType == 'exe' or be.binaryType == 'plugin'):
for fl in os.listdir(be.installPath):
libName, ext = os.path.splitext(fl)
if ext == be.dynamicExt:
if be.compilerRoot == 'gcc' or be.compilerRoot == 'clang':
libName = libName.lstrip('lib')
for lib in be.libs:
if lib == libName:
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
elif ext == '.exp' or ext == '.ilk' or ext == '.lib' or ext == '.pdb': # msvc files
p = be.installPath + '/' + fl
try:
os.remove(p)
except Exception:
log.warning('failed to remove {0}', p)
if not os.path.exists(be.buildPath): # canary in the coal mine
log.info(be.infoStr + ' already clean')
return True
dirCleared = True
for fl in os.listdir(be.buildPath):
p = be.buildPath + '/' + fl
try:
os.remove(p)
except Exception:
dirCleared = False
log.warning('failed to remove {0}', p)
if dirCleared:
os.removedirs(be.buildPath)
if os.path.exists(be.targetInstallPath):
os.remove(be.targetInstallPath)
target, ext = os.path.splitext(be.targetInstallPath)
if ext == '.dll':
try:
os.remove(target + '.exp')
os.remove(target + '.lib')
except Exception:
pass
try:
os.removedirs(be.installPath)
except Exception:
pass
log.info(be.infoStr + ' all clean')
return True
def cleanAll(be = None, builds = None):
'''
cleans both the current project and also the dependencies
'''
if not be:
be = getBuildElements()
if not be:
return
buildsRef = builds
if not buildsRef:
buildsRef = be.builds
if type(buildsRef) is not list:
buildsRef = [buildsRef]
for build in buildsRef:
try:
be.configBuild(currentBuild = build)
except PybythecError as e:
log.error(e)
continue
except Exception as e:
log.error('unknown exception: {0}', e)
continue
_clean(be)
# clean library dependencies
for lib in be.libs:
for libSrcPath in be.libSrcPaths:
libPath = os.path.join(libSrcPath, lib)
if os.path.exists(libPath):
libBe = getBuildElements(
osType = be.osType,
compiler = be.compiler,
buildType = be.buildType,
binaryFormat = be.binaryFormat,
projConfig = be.projConfig,
globalConfig = be.globalConfig,
currentBuild = be.currentBuild,
libDir = libPath)
if not libBe:
return
clean(libBe) # builds = build)
def _runPreScript(be):
'''
looks for a pre-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
preScriptPath = pathRoot + '/pybythecPre.py'
if not os.path.exists(preScriptPath):
preScriptPath = pathRoot + '/.pybythecPre.py'
if os.path.exists(preScriptPath):
import imp
m = imp.load_source('', preScriptPath)
m.run(be)
def _runPostScript(be):
'''
looks for a post-build script and loads it as a module
'''
pathRoot = '.'
if be.libDir:
pathRoot = be.libDir
postScriptPath = pathRoot + '/pybythecPost.py'
if not os.path.exists(postScriptPath):
postScriptPath = pathRoot + '/.pybythecPost.py'
if os.path.exists(postScriptPath):
import imp
m = imp.load_source('', postScriptPath)
m.run(be)
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport webnotes\nfrom webnotes import _, msgprint\nfrom webnotes.utils import flt\nimport time\nfrom accoun... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport webnotes\nfrom webnotes import _, msgprint\nfrom webnotes.utils import flt\nimport t... | ```python
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, msgprint
from webnotes.utils import flt
import time
from accounts.utils import get_fiscal_year
from controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_costcenter_account_month_map(filters)
data = []
for cost_center, cost_center_items in cam_map.items():
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "company"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = ["Cost Center:Link/Cost Center:120", "Account:Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in ["Target (%s)", "Actual (%s)", "Variance (%s)"]:
if group_months:
label = label % (from_date.strftime("%b") + " - " + to_date.strftime("%b"))
else:
label = label % from_date.strftime("%b")
columns.append(label+":Float:120")
return columns + ["Total Target:Float:120", "Total Actual:Float:120",
"Total Variance:Float:120"]
#Get cost center & target details
def get_costcenter_target_details(filters):
return webnotes.conn.sql("""select cc.name, cc.distribution_id,
cc.parent_cost_center, bd.account, bd.budget_allocated
from `tabCost Center` cc, `tabBudget Detail` bd
where bd.parent=cc.name and bd.fiscal_year=%s and
cc.company=%s order by cc.name""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in webnotes.conn.sql("""select bd.name, bdd.month, bdd.percentage_allocation
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd
where bdd.parent=bd.name and bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(filters):
ac_details = webnotes.conn.sql("""select gl.account, gl.debit, gl.credit,
gl.cost_center, MONTHNAME(gl.posting_date) as month_name
from `tabGL Entry` gl, `tabBudget Detail` bd
where gl.fiscal_year=%s and company=%s
and bd.account=gl.account and bd.parent=gl.cost_center""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.cost_center, {}).setdefault(d.account, []).append(d)
return cc_actual_details
def get_costcenter_account_month_map(filters):
import datetime
costcenter_target_details = get_costcenter_target_details(filters)
tdd = get_target_distribution_details(filters)
actual_details = get_actual_details(filters)
cam_map = {}
for ccd in costcenter_target_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.name, {}).setdefault(ccd.account, {})\
.setdefault(month, webnotes._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.name][ccd.account][month]
month_percentage = tdd.get(ccd.distribution_id, {}).get(month, 0) \
if ccd.distribution_id else 100.0/12
tav_dict.target = flt(ccd.budget_allocated) * month_percentage / 100
for ad in actual_details.get(ccd.name, {}).get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += ad.debit - ad.credit
return cam_map
``` |
[
{
"content": "Here is the code block:\n```python\n# -*- coding: utf-8 -*-\n# flake8: noqa\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Cluster'\n ... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# flake8: noqa\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model '... | ```python
# -*- coding: utf-8 -*-
# flake8: noqa
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cluster'
db.create_table('documents_cluster', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('parameters', self.gf('django.db.models.fields.TextField')(default={})),
))
db.send_create_signal('documents', ['Cluster'])
# Adding M2M table for field documents on 'Cluster'
db.create_table('documents_cluster_documents', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('cluster', models.ForeignKey(orm['documents.cluster'], null=False)),
('document', models.ForeignKey(orm['documents.document'], null=False))
))
db.create_unique('documents_cluster_documents', ['cluster_id', 'document_id'])
def backwards(self, orm):
# Deleting model 'Cluster'
db.delete_table('documents_cluster')
# Removing M2M table for field documents on 'Cluster'
db.delete_table('documents_cluster_documents')
models = {
'documents.cluster': {
'Meta': {'object_name': 'Cluster'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['documents.Document']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '{}'})
},
'documents.document': {
'Meta': {'object_name': 'Document'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['documents']
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nimport os\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.template.defaultfilters import slugify\n\n\nclass RecordTracking(models.Model):\n user = models.CharField(\n null=True, blank=True,\n ... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nimport os\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.template.defaultfilters import slugify\n\n\nclass RecordTracking(models.Model):\n user = models.CharField(\n null=True... | ```python
import os
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
class RecordTracking(models.Model):
user = models.CharField(
null=True, blank=True,
max_length=64, verbose_name='Usuário')
time = models.DateTimeField(
null=True, blank=True,
auto_now_add=True, verbose_name='Hora')
table = models.CharField(
max_length=64, verbose_name='Tabela')
record_id = models.IntegerField(
db_index=True, verbose_name='Id do registro')
iud = models.CharField(
max_length=1, verbose_name='Ação')
# versão 1
# rt.log = dict_object
# versão 2
# rt.log = yaml
log_version = models.SmallIntegerField(
verbose_name='Versão do log', default=1)
log = models.CharField(
max_length=65535, verbose_name='Log')
class Meta:
db_table = "fo2_ger_record_tracking"
verbose_name = "log de registro"
verbose_name_plural = "logs de registro de tabela"
index_together = [
("time", "table", "record_id"),
]
indexes = [
models.Index(fields=['-id'], name='fo2_ger_record_track_desc_id'),
]
class Dispositivos(models.Model):
key = models.CharField(
max_length=64, verbose_name='Chave de identificação')
nome = models.CharField(
null=True, blank=True,
max_length=64, verbose_name='Nome do dispositivo')
def __str__(self):
if self.nome:
return self.nome
else:
return self.key
class Meta:
db_table = "fo2_ger_dispositivos"
verbose_name = "dispositivo"
class RoloBipado(models.Model):
dispositivo = models.ForeignKey(
Dispositivos, on_delete=models.CASCADE, null=True, blank=True)
rolo = models.IntegerField(
verbose_name='Rolo')
date = models.DateTimeField(
auto_now_add=True, blank=True, verbose_name='Data/Hora')
referencia = models.CharField(
max_length=5, verbose_name='Referência')
tamanho = models.CharField(
max_length=3, verbose_name='Tamanho')
cor = models.CharField(
max_length=6, verbose_name='Cor')
usuario = models.ForeignKey(
User, on_delete=models.CASCADE, null=True, blank=True,
verbose_name='usuário')
class Meta:
db_table = "fo2_ger_rolo_bipado"
verbose_name = "rolo bipado"
verbose_name_plural = "rolos bipados"
permissions = (("can_beep_rolo", "Can beep rolo"),)
class Painel(models.Model):
nome = models.CharField(
null=True, blank=True,
max_length=64)
slug = models.SlugField()
layout = models.CharField(
null=True, blank=True, max_length=4096,
verbose_name='receita')
habilitado = models.NullBooleanField(
default=True)
def __str__(self):
return self.nome
class Meta:
db_table = "fo2_ger_painel"
verbose_name = "painel"
verbose_name_plural = "paineis"
def save(self, *args, **kwargs):
self.slug = slugify(self.nome)
super(Painel, self).save(*args, **kwargs)
class PainelModulo(models.Model):
nome = models.CharField(
null=True, blank=True,
max_length=64)
slug = models.SlugField()
TIPOS_DE_MODULOS = (
('I', 'Informativo'),
('C', 'URL de cartaz em imagem'),
)
tipo = models.CharField(
max_length=1, choices=TIPOS_DE_MODULOS,
default='I')
habilitado = models.NullBooleanField(
default=True)
def __str__(self):
return self.nome
class Meta:
db_table = "fo2_ger_painel_modulo"
verbose_name = "modulo de painel"
verbose_name_plural = "modulos de painel"
def save(self, *args, **kwargs):
self.slug = slugify(self.nome)
super(PainelModulo, self).save(*args, **kwargs)
class UsuarioPainelModulo(models.Model):
usuario = models.ForeignKey(
User, on_delete=models.CASCADE,
verbose_name='usuário')
painel_modulo = models.ForeignKey(
PainelModulo, on_delete=models.CASCADE,
verbose_name='módulo de painel')
class Meta:
db_table = "fo2_ger_usr_pnl_modulo"
verbose_name = "usuário de modulo de painel"
verbose_name_plural = "usuários de modulos de painel"
class InformacaoModulo(models.Model):
usuario = models.ForeignKey(
User, on_delete=models.CASCADE,
verbose_name='usuário')
painel_modulo = models.ForeignKey(
PainelModulo, on_delete=models.CASCADE,
verbose_name='módulo de painel')
data = models.DateTimeField(
null=True, blank=True,
auto_now_add=True, verbose_name='Data')
chamada = models.CharField(
max_length=200, null=True, blank=True,
verbose_name='chamada')
habilitado = models.NullBooleanField(default=True)
texto = models.CharField(
null=True, blank=True, max_length=4096,
verbose_name='receita')
class Meta:
db_table = "fo2_ger_modulo_info"
class PopAssunto(models.Model):
nome = models.CharField(
max_length=255, blank=True)
slug = models.SlugField(default='slug')
grupo = models.CharField(
max_length=255, blank=True)
grupo_slug = models.SlugField(default='slug')
diretorio = models.CharField(
'diretório',
max_length=50, blank=True)
def __str__(self):
return self.nome
def save(self, *args, **kwargs):
self.slug = slugify(self.nome)
self.grupo_slug = slugify(self.grupo)
super(PopAssunto, self).save(*args, **kwargs)
class Meta:
db_table = "fo2_ger_pop_assunto"
def pop_upload_to(instance, filename):
return os.path.join('pop', instance.assunto.diretorio, filename)
class Pop(models.Model):
assunto = models.ForeignKey(
PopAssunto, on_delete=models.CASCADE, default=1,
verbose_name='assunto do POP')
descricao = models.CharField(
max_length=255, blank=True, verbose_name='título')
pop = models.FileField(upload_to=pop_upload_to, verbose_name='Arquivo POP')
uploaded_at = models.DateTimeField(
auto_now_add=True, verbose_name='Inserido em')
habilitado = models.NullBooleanField(default=True)
class Meta:
db_table = "fo2_ger_pop"
permissions = (("can_manage_pop", "Can manage pop"),)
class UsuarioPopAssunto(models.Model):
usuario = models.ForeignKey(
User, on_delete=models.CASCADE,
verbose_name='usuário')
assunto = models.ForeignKey(
PopAssunto, on_delete=models.CASCADE,
verbose_name='assunto de POP')
class Meta:
db_table = "fo2_ger_usr_pop_assunto"
verbose_name = "usuário de assunto de POP"
verbose_name_plural = "usuários de assuntos de POPs"
class TipoParametro(models.Model):
codigo = models.CharField(
max_length=5, unique=True, verbose_name='código')
descricao = models.CharField(
max_length=255, unique=True, verbose_name='descrição')
def __str__(self):
return '{} - {}'.format(self.codigo, self.descricao)
class Meta:
db_table = "fo2_param_tipo"
verbose_name = "Tipo de parâmetro"
verbose_name_plural = "Tipos de parâmetros"
class Parametro(models.Model):
codigo = models.CharField(
max_length=25, unique=True, verbose_name='código')
descricao = models.CharField(
max_length=255, unique=True, verbose_name='descrição')
tipo = models.ForeignKey(
TipoParametro, on_delete=models.CASCADE)
ajuda = models.CharField(
max_length=65535, null=True, blank=True)
habilitado = models.NullBooleanField(
default=True)
usuario = models.NullBooleanField(
default=True, verbose_name='usuário')
def __str__(self):
return '({}) {}'.format(self.codigo, self.descricao)
class Meta:
db_table = "fo2_parametro"
verbose_name = "Parâmetro"
class Config(models.Model):
parametro = models.ForeignKey(
Parametro, on_delete=models.CASCADE)
usuario = models.ForeignKey(
User, on_delete=models.CASCADE, null=True, blank=True,
verbose_name='usuário')
valor = models.CharField(
max_length=255)
class Meta:
db_table = "fo2_config"
verbose_name = "Configuração"
verbose_name_plural = "Configurações"
``` |
[
{
"content": "```python\n\"\"\"Import and export collision data\"\"\"\n\nclass TestBhkCollisionSphereShape(TestBaseGeometry, TestBhkCollision):\n n_name = \"collisions/base_bhkcollision_sphere\" #name of nif\n b_name = \"Cube\" #name of blender mesh object\n\n def b_create_object(self):\n b_obj ... | [
{
"content": "<|memory_start|>```python\n\"\"\"Import and export collision data\"\"\"\n\nclass TestBhkCollisionSphereShape(TestBaseGeometry, TestBhkCollision):\n n_name = \"collisions/base_bhkcollision_sphere\" #name of nif\n b_name = \"Cube\" #name of blender mesh object\n\n def b_create_object(self):... | ```python
"""Import and export collision data"""
class TestBhkCollisionSphereShape(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_sphere" #name of nif
b_name = "Cube" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self)
bpy.ops.mesh.primitive_uv_sphere_add()
b_coll = bpy.data.objects["Sphere"]
b_coll.data.show_double_sided = False
b_coll.name = "CollisionSphere"
b_coll = bpy.data.objects["CollisionSphere"]
class TestBhkCollisionTriangleShape(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_triangle" #name of nif
b_name = "CubeObject" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self, self.b_name)
bpy.ops.mesh.primitive_cube_add()
b_coll = bpy.data.objects["Cube"]
b_coll.data.show_double_sided = False
b_coll.name = "CollisionTriangles"
b_coll = bpy.data.objects["CollisionTriangles"]
def b_check_geom(self, b_mesh):
if b_mesh.name == "poly0":
nose.tools.assert_equal(len(b_mesh.vertices), 8)
class TestBhkCapsuleObject(TestBaseGeometry, TestBhkCollision):
n_name = "collisions/base_bhkcollision_capsule" #name of nif
b_name = "Cube" #name of blender mesh object
def b_create_object(self):
b_obj = TestBaseGeometry.b_create_object(self, self.b_name)
bpy.ops.mesh.primitive_cylinder_add(vertices=8,radius=1.2,depth=2)
b_coll = bpy.context.active_object
b_coll.data.show_double_sided = False
b_coll.name = "CollisionCapsule"
b_coll = bpy.data.objects["CollisionCapsule"]
b_coll.draw_type = 'WIRE'
``` |
[
{
"content": "```python\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nfrom pycpd import AffineRegistration\n\n\ndef test_2D():\n B = np.array([[1.0, 0.5], [0, 1.0]])\n t = np.array([0.5, 1.0])\n\n Y = np.loadtxt('data/fish_target.txt')\n ... | [
{
"content": "<|memory_start|>```python\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nfrom pycpd import AffineRegistration\n\n\ndef test_2D():\n B = np.array([[1.0, 0.5], [0, 1.0]])\n t = np.array([0.5, 1.0])\n\n Y = np.loadtxt('data/fish_... | ```python
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from pycpd import AffineRegistration
def test_2D():
B = np.array([[1.0, 0.5], [0, 1.0]])
t = np.array([0.5, 1.0])
Y = np.loadtxt('data/fish_target.txt')
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
def test_3D():
B = np.array([[1.0, 0.5, 0.0], [0, 1.0, 0.0], [0.0, 0.0, 1.0]])
t = np.array([0.5, 1.0, -2.0])
fish_target = np.loadtxt('data/fish_target.txt')
Y1 = np.zeros((fish_target.shape[0], fish_target.shape[1] + 1))
Y1[:, :-1] = fish_target
Y2 = np.ones((fish_target.shape[0], fish_target.shape[1] + 1))
Y2[:, :-1] = fish_target
Y = np.vstack((Y1, Y2))
X = np.dot(Y, B) + np.tile(t, (np.shape(Y)[0], 1))
reg = AffineRegistration(**{'X': X, 'Y': Y})
TY, (B_reg, t_reg) = reg.register()
assert_array_almost_equal(B, B_reg)
assert_array_almost_equal(t, t_reg)
assert_array_almost_equal(X, TY)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'DatabaseMigrate.origin... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'Databa... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DatabaseMigrate.origin_environment'
db.add_column(u'maintenance_databasemigrate', 'origin_environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['physical.Environment']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DatabaseMigrate.origin_environment'
db.delete_column(u'maintenance_databasemigrate', 'origin_environment_id')
models = {
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'backup.backupgroup': {
'Meta': {'object_name': 'BackupGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'logical.database': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'},
'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_auto_resize': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}),
'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'quarantine_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_quarantine'", 'null': 'True', 'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'logical.project': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasechangeparameter': {
'Meta': {'object_name': 'DatabaseChangeParameter'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'change_parameters'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_change_parameters'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseconfiguressl': {
'Meta': {'object_name': 'DatabaseConfigureSSL'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'configure_ssl'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_configure_ssl'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasecreate': {
'Meta': {'object_name': 'DatabaseCreate'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['logical.Database']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.DatabaseInfra']"}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Project']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'create_database'", 'to': u"orm['notification.TaskHistory']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'maintenance.databasedestroy': {
'Meta': {'object_name': 'DatabaseDestroy'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_destroy'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['logical.Database']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_destroy'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_destroy'", 'to': u"orm['physical.DatabaseInfra']"}),
'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_destroy'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_destroy'", 'null': 'True', 'to': u"orm['logical.Project']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_destroy'", 'to': u"orm['notification.TaskHistory']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_destroy'", 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'maintenance.databasemigrate': {
'Meta': {'object_name': 'DatabaseMigrate'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_migrate'", 'to': u"orm['logical.Database']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_migrate'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_migrate'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databasereinstallvm': {
'Meta': {'object_name': 'DatabaseReinstallVM'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'reinstall_vm'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinstall_vm'", 'to': u"orm['physical.Instance']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinsgtall_vm'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseresize': {
'Meta': {'object_name': 'DatabaseResize'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'resizes'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_offer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_resizes_source'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Offering']"}),
'source_offer_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_offer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_resizes_target'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Offering']"}),
'target_offer_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestore': {
'Meta': {'object_name': 'DatabaseRestore'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['backup.BackupGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_restore_new'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaserestoreinstancepair': {
'Meta': {'unique_together': "((u'master', u'slave', u'restore'),)", 'object_name': 'DatabaseRestoreInstancePair'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_master'", 'to': u"orm['physical.Instance']"}),
'restore': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_instances'", 'to': u"orm['maintenance.DatabaseRestore']"}),
'slave': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_slave'", 'to': u"orm['physical.Instance']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.databaseupgrade': {
'Meta': {'object_name': 'DatabaseUpgrade'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'upgrades'", 'to': u"orm['logical.Database']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_source'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'source_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'target_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_target'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'target_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_upgrades'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.hostmaintenance': {
'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}),
'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.hostmigrate': {
'Meta': {'object_name': 'HostMigrate'},
'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'database_migrate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'hosts'", 'null': 'True', 'to': u"orm['maintenance.DatabaseMigrate']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_migrate'", 'to': u"orm['physical.Environment']"}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'migrate'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_migrate'", 'to': u"orm['notification.TaskHistory']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zone': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'maintenance.maintenance': {
'Meta': {'object_name': 'Maintenance'},
'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'disable_alarms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_script': ('django.db.models.fields.TextField', [], {}),
'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'revoked_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'maintenance.maintenanceparameters': {
'Meta': {'object_name': 'MaintenanceParameters'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}),
'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'notification.taskhistory': {
'Meta': {'object_name': 'TaskHistory'},
'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'relevance': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '1'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'task_status': ('django.db.models.fields.CharField', [], {'default': "u'WAITING'", 'max_length': '100', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['maintenance']
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\"INSTEON Climate Control Device Class.\"\"\"\nimport logging\n\nfrom insteonplm.devices import Device\nfrom insteonplm.constants import COMMAND_EXTENDED_GET_SET_0X2E_0X00\nfrom insteonplm.messages.extendedSend import ExtendedSend\nfrom insteonplm.messages.user... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"INSTEON Climate Control Device Class.\"\"\"\nimport logging\n\nfrom insteonplm.devices import Device\nfrom insteonplm.constants import COMMAND_EXTENDED_GET_SET_0X2E_0X00\nfrom insteonplm.messages.extendedSend import ExtendedSend\nfrom insteonp... | ```python
"""INSTEON Climate Control Device Class."""
import logging
from insteonplm.devices import Device
from insteonplm.constants import COMMAND_EXTENDED_GET_SET_0X2E_0X00
from insteonplm.messages.extendedSend import ExtendedSend
from insteonplm.messages.userdata import Userdata
from insteonplm.states.thermostat import (
Temperature,
Humidity,
SystemMode,
FanMode,
CoolSetPoint,
HeatSetPoint,
)
from insteonplm.states.statusReport import StatusReport
_LOGGER = logging.getLogger(__name__)
class ClimateControl_2441th(Device):
"""Thermostat model 2441TH."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = CoolSetPoint(
self._address,
"coolSetPoint",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0x02] = HeatSetPoint(
self._address,
"heatSetPoint",
0x02,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0xEF] = StatusReport(
self._address,
"statusReport",
0xEF,
self._send_msg,
self._message_callbacks,
0x00,
)
self._system_mode = SystemMode(
self._address,
"systemMode",
0x10,
self._send_msg,
self._message_callbacks,
0x00,
)
self._fan_mode = FanMode(
self._address,
"fanMode",
0x11,
self._send_msg,
self._message_callbacks,
0x00,
)
self._temp = Temperature(
self._address,
"temperature",
0x12,
self._send_msg,
self._message_callbacks,
0x00,
)
self._humidity = Humidity(
self._address,
"humidity",
0x13,
self._send_msg,
self._message_callbacks,
0x00,
)
@property
def cool_set_point(self):
"""Return the cool set point state."""
return self._stateList[0x01]
@property
def heat_set_point(self):
"""Return the heat set point state."""
return self._stateList[0x02]
@property
def system_mode(self):
"""Return the mode state."""
return self._system_mode
@property
def fan_mode(self):
"""Return the mode state."""
return self._fan_mode
@property
def temperature(self):
"""Return the temperature state."""
return self._temp
@property
def humidity(self):
"""Return the humidity state."""
return self._humidity
def async_refresh_state(self):
"""Request each state to provide status update."""
_LOGGER.debug("Setting up extended status")
ext_status = ExtendedSend(
address=self._address,
commandtuple=COMMAND_EXTENDED_GET_SET_0X2E_0X00,
cmd2=0x02,
userdata=Userdata(),
)
ext_status.set_crc()
_LOGGER.debug("Sending ext status: %s", ext_status)
self._send_msg(ext_status)
_LOGGER.debug("Sending temp status request")
self.temperature.async_refresh_state()
# pylint: disable=unused-argument
def _mode_changed(self, addr, group, val):
self.async_refresh_state()
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom bamslips.fund.fund_realtime_info import paralle_get_fund_realtime_info_according_fund_code\nfrom bamslips.conf.fund_code_list import code_list\n\n\nsched = BlockingScheduler()\n\n#@sched.s... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom bamslips.fund.fund_realtime_info import paralle_get_fund_realtime_info_according_fund_code\nfrom bamslips.conf.fund_code_list import code_list\n\n\nsched = BlockingSchedule... | ```python
from apscheduler.schedulers.blocking import BlockingScheduler
from bamslips.fund.fund_realtime_info import paralle_get_fund_realtime_info_according_fund_code
from bamslips.conf.fund_code_list import code_list
sched = BlockingScheduler()
#@sched.scheduled_job('cron',id='fund_realtime_fun_info_job',hour='0-4,9-16,21-24',minute="*/10")
#def sch_append_fund_realtime_info():
# paralle_get_fund_realtime_info_according_fund_code()
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_0',hour='0-4,9-16,21-24',minute="0/10")
def sch_append_fund_realtime_info_0():
paralle_get_fund_realtime_info_according_fund_code(code_list[0::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_1',hour='0-4,9-16,21-24',minute="1/10")
def sch_append_fund_realtime_info_1():
paralle_get_fund_realtime_info_according_fund_code(code_list[1::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_2',hour='0-4,9-16,21-24',minute="2/10")
def sch_append_fund_realtime_info_2():
paralle_get_fund_realtime_info_according_fund_code(code_list[2::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_3',hour='0-4,9-16,21-24',minute="3/10")
def sch_append_fund_realtime_info_3():
paralle_get_fund_realtime_info_according_fund_code(code_list[3::4])
sched.start()
``` |
[
{
"content": "Here is some code:\n```python\n# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport base64\nimport logging\nimport psycopg2\nimport werkzeug.utils\nimport werkzeug.wrappers\n\nfrom werkzeug.urls import url_encode\n\nfrom odoo import api, htt... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport base64\nimport logging\nimport psycopg2\nimport werkzeug.utils\nimport werkzeug.wrappers\n\nfrom werkzeug.urls import url_encode\n\nfrom odoo... | ```python
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import logging
import psycopg2
import werkzeug.utils
import werkzeug.wrappers
from werkzeug.urls import url_encode
from odoo import api, http, registry, SUPERUSER_ID, _
from odoo.exceptions import AccessError
from odoo.http import request
from odoo.tools import consteq
_logger = logging.getLogger(__name__)
class MailController(http.Controller):
_cp_path = '/mail'
@classmethod
def _redirect_to_messaging(cls):
url = '/web#%s' % url_encode({'action': 'mail.action_discuss'})
return werkzeug.utils.redirect(url)
@classmethod
def _check_token(cls, token):
base_link = request.httprequest.path
params = dict(request.params)
params.pop('token', '')
valid_token = request.env['mail.thread']._notify_encode_link(base_link, params)
return consteq(valid_token, str(token))
@classmethod
def _check_token_and_record_or_redirect(cls, model, res_id, token):
comparison = cls._check_token(token)
if not comparison:
_logger.warning(_('Invalid token in route %s', request.httprequest.url))
return comparison, None, cls._redirect_to_messaging()
try:
record = request.env[model].browse(res_id).exists()
except Exception:
record = None
redirect = cls._redirect_to_messaging()
else:
redirect = cls._redirect_to_record(model, res_id)
return comparison, record, redirect
@classmethod
def _redirect_to_record(cls, model, res_id, access_token=None, **kwargs):
# access_token and kwargs are used in the portal controller override for the Send by email or Share Link
# to give access to the record to a recipient that has normally no access.
uid = request.session.uid
user = request.env['res.users'].sudo().browse(uid)
cids = False
# no model / res_id, meaning no possible record -> redirect to login
if not model or not res_id or model not in request.env:
return cls._redirect_to_messaging()
# find the access action using sudo to have the details about the access link
RecordModel = request.env[model]
record_sudo = RecordModel.sudo().browse(res_id).exists()
if not record_sudo:
# record does not seem to exist -> redirect to login
return cls._redirect_to_messaging()
# the record has a window redirection: check access rights
if uid is not None:
if not RecordModel.with_user(uid).check_access_rights('read', raise_exception=False):
return cls._redirect_to_messaging()
try:
# We need here to extend the "allowed_company_ids" to allow a redirection
# to any record that the user can access, regardless of currently visible
# records based on the "currently allowed companies".
cids = request.httprequest.cookies.get('cids', str(user.company_id.id))
cids = [int(cid) for cid in cids.split(',')]
try:
record_sudo.with_user(uid).with_context(allowed_company_ids=cids).check_access_rule('read')
except AccessError:
# In case the allowed_company_ids from the cookies (i.e. the last user configuration
# on his browser) is not sufficient to avoid an ir.rule access error, try to following
# heuristic:
# - Guess the supposed necessary company to access the record via the method
# _get_mail_redirect_suggested_company
# - If no company, then redirect to the messaging
# - Merge the suggested company with the companies on the cookie
# - Make a new access test if it succeeds, redirect to the record. Otherwise,
# redirect to the messaging.
suggested_company = record_sudo._get_mail_redirect_suggested_company()
if not suggested_company:
raise AccessError('')
cids = cids + [suggested_company.id]
record_sudo.with_user(uid).with_context(allowed_company_ids=cids).check_access_rule('read')
except AccessError:
return cls._redirect_to_messaging()
else:
record_action = record_sudo.get_access_action(access_uid=uid)
else:
record_action = record_sudo.get_access_action()
if record_action['type'] == 'ir.actions.act_url' and record_action.get('target_type') != 'public':
return cls._redirect_to_messaging()
record_action.pop('target_type', None)
# the record has an URL redirection: use it directly
if record_action['type'] == 'ir.actions.act_url':
return werkzeug.utils.redirect(record_action['url'])
# other choice: act_window (no support of anything else currently)
elif not record_action['type'] == 'ir.actions.act_window':
return cls._redirect_to_messaging()
url_params = {
'model': model,
'id': res_id,
'active_id': res_id,
'action': record_action.get('id'),
}
view_id = record_sudo.get_formview_id()
if view_id:
url_params['view_id'] = view_id
if cids:
url_params['cids'] = ','.join([str(cid) for cid in cids])
url = '/web?#%s' % url_encode(url_params)
return werkzeug.utils.redirect(url)
@http.route('/mail/read_followers', type='json', auth='user')
def read_followers(self, follower_ids):
request.env['mail.followers'].check_access_rights("read")
follower_recs = request.env['mail.followers'].sudo().browse(follower_ids)
res_ids = follower_recs.mapped('res_id')
res_models = set(follower_recs.mapped('res_model'))
if len(res_models) > 1:
raise AccessError(_("Can't read followers with different targeted model"))
res_model = res_models.pop()
request.env[res_model].check_access_rights("read")
request.env[res_model].browse(res_ids).check_access_rule("read")
followers = []
follower_id = None
for follower in follower_recs:
if follower.partner_id == request.env.user.partner_id:
follower_id = follower.id
followers.append({
'id': follower.id,
'partner_id': follower.partner_id.id,
'channel_id': follower.channel_id.id,
'name': follower.name,
'email': follower.email,
'is_active': follower.is_active,
# When editing the followers, the "pencil" icon that leads to the edition of subtypes
# should be always be displayed and not only when "debug" mode is activated.
'is_editable': True
})
return {
'followers': followers,
'subtypes': self.read_subscription_data(follower_id) if follower_id else None
}
@http.route('/mail/read_subscription_data', type='json', auth='user')
def read_subscription_data(self, follower_id):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
request.env['mail.followers'].check_access_rights("read")
follower = request.env['mail.followers'].sudo().browse(follower_id)
follower.ensure_one()
request.env[follower.res_model].check_access_rights("read")
request.env[follower.res_model].browse(follower.res_id).check_access_rule("read")
# find current model subtypes, add them to a dictionary
subtypes = request.env['mail.message.subtype'].search([
'&', ('hidden', '=', False),
'|', ('res_model', '=', follower.res_model), ('res_model', '=', False)])
followed_subtypes_ids = set(follower.subtype_ids.ids)
subtypes_list = [{
'name': subtype.name,
'res_model': subtype.res_model,
'sequence': subtype.sequence,
'default': subtype.default,
'internal': subtype.internal,
'followed': subtype.id in followed_subtypes_ids,
'parent_model': subtype.parent_id.res_model,
'id': subtype.id
} for subtype in subtypes]
return sorted(subtypes_list,
key=lambda it: (it['parent_model'] or '', it['res_model'] or '', it['internal'], it['sequence']))
@http.route('/mail/view', type='http', auth='public')
def mail_action_view(self, model=None, res_id=None, access_token=None, **kwargs):
""" Generic access point from notification emails. The heuristic to
choose where to redirect the user is the following :
- find a public URL
- if none found
- users with a read access are redirected to the document
- users without read access are redirected to the Messaging
- not logged users are redirected to the login page
models that have an access_token may apply variations on this.
"""
# ==============================================================================================
# This block of code disappeared on saas-11.3 to be reintroduced by TBE.
# This is needed because after a migration from an older version to saas-11.3, the link
# received by mail with a message_id no longer work.
# So this block of code is needed to guarantee the backward compatibility of those links.
if kwargs.get('message_id'):
try:
message = request.env['mail.message'].sudo().browse(int(kwargs['message_id'])).exists()
except:
message = request.env['mail.message']
if message:
model, res_id = message.model, message.res_id
# ==============================================================================================
if res_id and isinstance(res_id, str):
res_id = int(res_id)
return self._redirect_to_record(model, res_id, access_token, **kwargs)
@http.route('/mail/assign', type='http', auth='user', methods=['GET'])
def mail_action_assign(self, model, res_id, token=None):
comparison, record, redirect = self._check_token_and_record_or_redirect(model, int(res_id), token)
if comparison and record:
try:
record.write({'user_id': request.uid})
except Exception:
return self._redirect_to_messaging()
return redirect
@http.route('/mail/<string:res_model>/<int:res_id>/avatar/<int:partner_id>', type='http', auth='public')
def avatar(self, res_model, res_id, partner_id):
headers = [('Content-Type', 'image/png')]
status = 200
content = 'R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' # default image is one white pixel
if res_model in request.env:
try:
# if the current user has access to the document, get the partner avatar as sudo()
request.env[res_model].browse(res_id).check_access_rule('read')
if partner_id in request.env[res_model].browse(res_id).sudo().exists().message_ids.mapped('author_id').ids:
status, headers, _content = request.env['ir.http'].sudo().binary_content(
model='res.partner', id=partner_id, field='image_128', default_mimetype='image/png')
# binary content return an empty string and not a placeholder if obj[field] is False
if _content != '':
content = _content
if status == 304:
return werkzeug.wrappers.Response(status=304)
except AccessError:
pass
image_base64 = base64.b64decode(content)
headers.append(('Content-Length', len(image_base64)))
response = request.make_response(image_base64, headers)
response.status = str(status)
return response
@http.route('/mail/needaction', type='json', auth='user')
def needaction(self):
return request.env['res.partner'].get_needaction_count()
@http.route('/mail/init_messaging', type='json', auth='user')
def mail_init_messaging(self):
values = {
'needaction_inbox_counter': request.env['res.partner'].get_needaction_count(),
'starred_counter': request.env['res.partner'].get_starred_count(),
'channel_slots': request.env['mail.channel'].channel_fetch_slot(),
'mail_failures': request.env['mail.message'].message_fetch_failed(),
'commands': request.env['mail.channel'].get_mention_commands(),
'mention_partner_suggestions': request.env['res.partner'].get_static_mention_suggestions(),
'shortcodes': request.env['mail.shortcode'].sudo().search_read([], ['source', 'substitution', 'description']),
'menu_id': request.env['ir.model.data'].xmlid_to_res_id('mail.menu_root_discuss'),
'is_moderator': request.env.user.is_moderator,
'moderation_counter': request.env.user.moderation_counter,
'moderation_channel_ids': request.env.user.moderation_channel_ids.ids,
'partner_root': request.env.ref('base.partner_root').sudo().mail_partner_format(),
'public_partner': request.env.ref('base.public_partner').sudo().mail_partner_format(),
}
return values
@http.route('/mail/get_partner_info', type='json', auth='user')
def message_partner_info_from_emails(self, model, res_ids, emails, link_mail=False):
records = request.env[model].browse(res_ids)
try:
records.check_access_rule('read')
records.check_access_rights('read')
except:
return []
return records._message_partner_info_from_emails(emails, link_mail=link_mail)
@http.route('/mail/get_suggested_recipients', type='json', auth='user')
def message_get_suggested_recipients(self, model, res_ids):
records = request.env[model].browse(res_ids)
try:
records.check_access_rule('read')
records.check_access_rights('read')
except:
return {}
return records._message_get_suggested_recipients()
``` |
[
{
"content": "```python\n\"\"\"\nResystem Scheduling Service.\nReleased under New BSD License.\nCopyright © 2015, Vadim Markovtsev :: Angry Developers LLC\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nResystem Scheduling Service.\nReleased under New BSD License.\nCopyright © 2015, Vadim Markovtsev :: Angry Developers LLC\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the foll... | ```python
"""
Resystem Scheduling Service.
Released under New BSD License.
Copyright © 2015, Vadim Markovtsev :: Angry Developers LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Angry Developers LLC nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VADIM MARKOVTSEV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from setuptools import setup
import os
def parse_requirements():
path = os.path.join(os.path.dirname(__file__), "requirements.txt")
reqs = []
with open(path, "r") as fin:
for r in fin.read().split("\n"):
r = r.strip()
if r.startswith("#") or not r:
continue
if r.startswith("git+"):
print("Warning: git dependencies cannot be used in setuptools "
"(%s)" % r)
continue
if not r.startswith("-r"):
reqs.append(r)
return reqs
setup(
name="res-scheduling",
description="Resystem Scheduling Service",
version="1.0.2",
license="New BSD",
author="Vadim Markovtsev",
author_email="gmarkhor@gmail.com",
url="https://github.com/AngryDevelopersLLC/res-scheduler",
download_url='https://github.com/AngryDevelopersLLC/res-scheduler',
packages=["res.scheduling"],
install_requires=parse_requirements(),
package_data={"": [
'res/scheduling/requirements/base.txt',
'res/scheduling/res_scheduling.service',
'res/scheduling/run.sh']},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
``` |
[
{
"content": "Recreate the original code text:\n```python\n#! /usr/bin/env python3\n\nimport sys, os\nimport unittest\nimport yaml\nimport json\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport tammy\n\nclass PeerJ(unittest.TestCase):\n def test_pubtype(self):\n self.assertRais... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n#! /usr/bin/env python3\n\nimport sys, os\nimport unittest\nimport yaml\nimport json\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport tammy\n\nclass PeerJ(unittest.TestCase):\n def test_pubtype(self):\n ... | ```python
#! /usr/bin/env python3
import sys, os
import unittest
import yaml
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import tammy
class PeerJ(unittest.TestCase):
def test_pubtype(self):
self.assertRaises(ValueError, tammy.from_peerj, 200, 'paper')
def test_argtype(self):
self.assertRaises(TypeError, tammy.from_peerj, ['200'], 'article')
def test_works_for_paper(self):
test_paper = tammy.from_peerj(251, 'article')
assert test_paper[u'author'][0][u'family'] == u'Poisot'
def test_no_id(self):
self.assertRaises(ValueError, tammy.from_peerj, 0, 'article')
def test_works_for_preprint(self):
test_paper = tammy.from_peerj(50, 'preprint')
assert test_paper[u'author'][0][u'family'] == u'Poisot'
class CrossRef(unittest.TestCase):
def test_correct_doi(self):
assert isinstance(tammy.from_crossref_doi('10.7717/peerj.426'), dict)
def test_no_doi(self):
with self.assertRaises(ValueError):
tammy.from_crossref_doi('no/no/no')
def main():
if sys.version_info[1] < 7 :
unittest.main(verbosity=2)
else :
unittest.main(verbosity=2)
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is the source code:\n```python\ndef find_rigid_transform(a, b, visualize=False):\n \"\"\"\n Args:\n a: a 3xN array of vertex locations\n b: a 3xN array of vertex locations\n\n Returns: (R,T) such that R.dot(a)+T ~= b\n Based on Arun et al, \"Least-squares fitting of ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\ndef find_rigid_transform(a, b, visualize=False):\n \"\"\"\n Args:\n a: a 3xN array of vertex locations\n b: a 3xN array of vertex locations\n\n Returns: (R,T) such that R.dot(a)+T ~= b\n Based on Arun et al, \"Least-squ... | ```python
def find_rigid_transform(a, b, visualize=False):
"""
Args:
a: a 3xN array of vertex locations
b: a 3xN array of vertex locations
Returns: (R,T) such that R.dot(a)+T ~= b
Based on Arun et al, "Least-squares fitting of two 3-D point sets," 1987.
See also Eggert et al, "Estimating 3-D rigid body transformations: a
comparison of four major algorithms," 1997.
"""
import numpy as np
import scipy.linalg
from blmath.numerics.matlab import col
if a.shape[0] != 3:
if a.shape[1] == 3:
a = a.T
if b.shape[0] != 3:
if b.shape[1] == 3:
b = b.T
assert a.shape[0] == 3
assert b.shape[0] == 3
a_mean = np.mean(a, axis=1)
b_mean = np.mean(b, axis=1)
a_centered = a - col(a_mean)
b_centered = b - col(b_mean)
c = a_centered.dot(b_centered.T)
u, s, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if scipy.linalg.det(R) < 0:
if np.any(s == 0): # This is only valid in the noiseless case; see the paper
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
else:
raise ValueError("find_rigid_transform found a reflection that it cannot recover from. Try RANSAC or something...")
T = col(b_mean - R.dot(a_mean))
if visualize != False:
from lace.mesh import Mesh
from lace.meshviewer import MeshViewer
mv = MeshViewer() if visualize is True else visualize
a_T = R.dot(a) + T
mv.set_dynamic_meshes([
Mesh(v=a.T, f=[]).set_vertex_colors('red'),
Mesh(v=b.T, f=[]).set_vertex_colors('green'),
Mesh(v=a_T.T, f=[]).set_vertex_colors('orange'),
])
return R, T
def find_rigid_rotation(a, b, allow_scaling=False):
"""
Args:
a: a 3xN array of vertex locations
b: a 3xN array of vertex locations
Returns: R such that R.dot(a) ~= b
See link: http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
"""
import numpy as np
import scipy.linalg
from blmath.numerics.matlab import col
assert a.shape[0] == 3
assert b.shape[0] == 3
if a.size == 3:
cx = np.cross(a.ravel(), b.ravel())
a = np.hstack((col(a), col(cx)))
b = np.hstack((col(b), col(cx)))
c = a.dot(b.T)
u, _, v = np.linalg.svd(c, full_matrices=False)
v = v.T
R = v.dot(u.T)
if scipy.linalg.det(R) < 0:
v[:, 2] = -v[:, 2]
R = v.dot(u.T)
if allow_scaling:
scalefactor = scipy.linalg.norm(b) / scipy.linalg.norm(a)
R = R * scalefactor
return R
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for li... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the pro... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TestSummariesOperations:
"""TestSummariesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~test_base.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
test_base_account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TestSummaryListResult"]:
"""Lists the Test Summaries of all the packages under a Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TestSummaryListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~test_base.models.TestSummaryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TestSummaryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries'} # type: ignore
async def get(
self,
resource_group_name: str,
test_base_account_name: str,
test_summary_name: str,
**kwargs: Any
) -> "_models.TestSummaryResource":
"""Gets a Test Summary with specific name from all the Test Summaries of all the packages under a
Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param test_summary_name: The name of the Test Summary.
:type test_summary_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestSummaryResource, or the result of cls(response)
:rtype: ~test_base.models.TestSummaryResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'testSummaryName': self._serialize.url("test_summary_name", test_summary_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestSummaryResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries/{testSummaryName}'} # type: ignore
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport clientConfig as cConf\nfrom mmp_tracer_api import objID\nfrom comsol_api import MMPComsolDummy\nfrom mupif import *\nimport logging\nlogger = logging.getLogger()\n\nimport time as timeTime\nstart = timeTime.time()\nlogger.info('Timer started'... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport clientConfig as cConf\nfrom mmp_tracer_api import objID\nfrom comsol_api import MMPComsolDummy\nfrom mupif import *\nimport logging\nlogger = logging.getLogger()\n\nimport time as timeTime\nstart = timeTime.time()\nlogger.info... | ```python
import clientConfig as cConf
from mmp_tracer_api import objID
from comsol_api import MMPComsolDummy
from mupif import *
import logging
logger = logging.getLogger()
import time as timeTime
start = timeTime.time()
logger.info('Timer started')
# locate nameserver
ns = PyroUtil.connectNameServer(
nshost=cConf.nshost, nsport=cConf.nsport, hkey=cConf.hkey)
# localize JobManager running on (remote) server and create a tunnel to it
# allocate the first application app1
try:
mieSolverAppRec = PyroUtil.allocateApplicationWithJobManager(
ns, cConf.mieSolverJobManRec, cConf.jobNatPorts.pop(0),
cConf.sshClient, cConf.options, cConf.sshHost)
tracerSolverAppRec = PyroUtil.allocateApplicationWithJobManager(
ns, cConf.tracerSolverJobManRec, cConf.jobNatPorts.pop(0),
cConf.sshClient, cConf.options, cConf.sshHost)
mieApp = mieSolverAppRec.getApplication()
tracerApp = tracerSolverAppRec.getApplication()
except Exception as e:
logger.exception(e)
else:
if((tracerApp is not None) and (mieApp is not None)):
logger.info("solvers are not None")
mieSolverSignature = mieApp.getApplicationSignature()
logger.info("Working mie solver on server " + mieSolverSignature)
tracerSolverSignature = tracerApp.getApplicationSignature()
logger.info("Working tracer solver on server " + tracerSolverSignature)
#mieApp = MMPMie('localhost')
#tracerApp = MMPRaytracer('localhost')
comsolApp = MMPComsolDummy('localhost')
# Connect functions
pScat = mieApp.getProperty(PropertyID.PID_ScatteringCrossSections, 0,
objectID=objID.OBJ_PARTICLE_TYPE_1)
pPhase = mieApp.getProperty(PropertyID.PID_InverseCumulativeDist, 0,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(pScat)
tracerApp.setProperty(pPhase)
# Connect fields
fTemp = comsolApp.getField(FieldID.FID_Temperature, 0)
fHeat = comsolApp.getField(FieldID.FID_Thermal_absorption_volume, 0)
tracerApp.setField(fTemp)
tracerApp.setField(fHeat)
# Connect properties
# Emission spectrum
import numpy as np
a = {}
A = np.loadtxt('../../../mmp_tracer_api/data/EM_GREEN.dat')
a['wavelengths'] = A[:, 0]
a['intensities'] = A[:, 1]
em = Property.Property(value=a,
propID=PropertyID.PID_EmissionSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(em)
# Excitation spectrum
b = {}
B = np.loadtxt('../../../mmp_tracer_api/data/EX_GREEN.dat')
b['wavelengths'] = B[:, 0]
b['intensities'] = B[:, 1]
ex = Property.Property(value=b,
propID=PropertyID.PID_ExcitationSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(ex)
# Absorption spectrum
c = {}
C = np.loadtxt('../../../mmp_tracer_api/data/Abs_GREEN.dat')
c['wavelengths'] = C[:, 0]
c['intensities'] = C[:, 1]
aabs = Property.Property(value=c,
propID=PropertyID.PID_AsorptionSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(aabs)
# Particle density
vDens = 0.00000003400
pDens = Property.Property(value=vDens,
propID=PropertyID.PID_ParticleNumberDensity,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_CONE)
tracerApp.setProperty(pDens)
# Number of rays to trace
pRays = Property.Property(value=100,
propID=PropertyID.PID_NumberOfRays,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_CONE)
tracerApp.setProperty(pRays)
# Solve
mieApp.solveStep(0)
logger.debug("mieApp.isSolved=", mieApp.isSolved()) # True
tracerApp.solveStep(0, runInBackground=False)
comsolApp.solveStep(0)
# Plot data to file
logger.info("Saving vtk")
v = fTemp.field2VTKData()
v.tofile('testTemperature.vtk')
v = fHeat.field2VTKData()
v.tofile('testHeat.vtk')
else:
logger.debug("Connection to server failed, exiting")
finally:
logger.debug("terminating apps...")
if mieSolverAppRec:
mieSolverAppRec.terminateAll()
if tracerSolverAppRec:
tracerSolverAppRec.terminateAll()
``` |
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n\nimport wx\n\nfrom ..common.path import resource_filename\nfrom .translator import Translator\nfrom ..common.i18n import N_\n\n\nclass BaseView(Translator):\n \"\"\"Base class for all views.\n\n This class come with helper functions ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport wx\n\nfrom ..common.path import resource_filename\nfrom .translator import Translator\nfrom ..common.i18n import N_\n\n\nclass BaseView(Translator):\n \"\"\"Base class for all views.\n\n This class come with h... | ```python
# -*- coding: utf-8 -*-
import wx
from ..common.path import resource_filename
from .translator import Translator
from ..common.i18n import N_
class BaseView(Translator):
"""Base class for all views.
This class come with helper functions to configure the view.
Class Attributes:
LIGHT_GRAY (wx.Colour): Predefined background color.
Attributes:
window (wx.Window): the window element the view is in charge.
"""
LIGHT_GRAY = wx.Colour(0xf2, 0xf2, 0xf2)
def __init__(self, window):
Translator.__init__(self)
# wx.Window instance.
self.window = window
def set_frame_title(self, title):
"""Set the title of the wx.Frame containing this Window.
Args:
title (str): new frame title. The title will be translated.
"""
frame = self.window.GetTopLevelParent()
self.register_i18n(frame, frame.SetTitle, title)
def make_sizer(self, direction, items, outside_border=True, flag=0,
proportion=0, sizer=None, border=15):
"""Recursively make sizers with border for simple cases.
Each element given will be added to the sizer, with appropriate
borders. Border between elements (even sub-sizer) will be merged.
Args:
direction: the direction of the first sizer. Can be wx.HORIZONTAL
or wx.VERTICAL.
items (list of wx.Window): a list of all elements to add to the
sizer. If an item is None, a stretchable spacer is added. If
it's another list, this function is called recursively with the
opposite direction.
outside_border (boolean, optional): If set to False, no outside
border are added: Only borders between elements will be
created.
flag (optional): if set, additional flags who will be passed to
each ``sizer.Add()`` call.
proportion (optional): If set, the parameter will be passed to each
``sizer.Add()`` call.
sizer (wx.Sizer, optional): If set, this empty sizer will be used,
instead of creating a new one.
border (integer, optional): size of the border to use
returns:
wx.Sizer: the top-level sizer created.
"""
swap_direction = {
wx.VERTICAL: wx.HORIZONTAL,
wx.HORIZONTAL: wx.VERTICAL
}
if not sizer:
sizer = wx.BoxSizer(direction)
# the first border is implemented as a Spacer,
# because borders of hidden elements don't appears.
if outside_border:
sizer.AddSpacer(border)
for (index, item) in enumerate(items):
if item is None:
sizer.AddStretchSpacer()
continue
flags = 0
if isinstance(item, list):
item = self.make_sizer(swap_direction[direction], item,
outside_border=False)
if isinstance(item, wx.Sizer):
flags |= wx.EXPAND
# Compute flag for merging common border.
if outside_border:
if direction is wx.VERTICAL:
flags |= wx.LEFT | wx.RIGHT
else:
flags |= wx.TOP | wx.BOTTOM
if len(items) - 1 is not index:
if direction is wx.VERTICAL:
flags |= wx.BOTTOM
else:
flags |= wx.RIGHT
flags |= flag
sizer.Add(item, border=border, flag=flags, proportion=proportion)
# last border
if outside_border:
sizer.AddSpacer(border)
return sizer
def create_settings_button_box(self, parent):
"""Create a common box with 3 buttons: ok, cancel, apply"""
btn_ok = wx.Button(parent, wx.ID_OK, name='btn_ok')
btn_cancel = wx.Button(parent, wx.ID_CANCEL, name='btn_cancel')
btn_apply = wx.Button(parent, wx.ID_APPLY, name='btn_apply')
self.register_many_i18n('SetLabel', {
btn_cancel: N_('Cancel'),
btn_ok: N_('OK'),
btn_apply: N_('Apply')
})
# Buttons box
button_box = wx.StdDialogButtonSizer()
button_box.SetAffirmativeButton(btn_ok)
button_box.SetCancelButton(btn_cancel)
button_box.AddButton(btn_apply)
# Layout the button box
button_box.Realize()
return button_box
def set_icon(self):
"""Set the standard Bajoo favicon to the window.
Note that the window must be an instance of wx.Frame.
"""
icon_path = resource_filename('assets/window_icon.png')
icon = wx.Icon(icon_path)
self.window.SetIcon(icon)
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport six\nfrom funcy import select_keys, cached_property, once, once_per, monkey, wraps\nfrom funcy.py2 import mapcat, map\nfrom .cross import pickle, md5\n\nimport django\nfro... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport sys\nimport json\nimport six\nfrom funcy import select_keys, cached_property, once, once_per, monkey, wraps\nfrom funcy.py2 import mapcat, map\nfrom .cross import pickle, md5\n\nim... | ```python
# -*- coding: utf-8 -*-
import sys
import json
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
try:
from django.db.models.query import MAX_GET_RESULTS
except ImportError:
MAX_GET_RESULTS = None
from .conf import model_profile, redis_client, handle_connection_failure, LRU, ALL_OPS
from .utils import monkey_mix, get_model_name, stamp_fields, load_script, \
func_cache_key, cached_view_fab, get_thread_id, family_has_profile
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout):
"""
Writes data to cache and creates appropriate invalidators.
"""
load_script('cache_thing', LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset.
NOTE: Ignores queryset cached ops settings, just caches.
"""
timeout = kwargs.get('timeout')
extra = kwargs.get('extra')
key_func = kwargs.get('key_func', func_cache_key)
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
# TODO: think of better way doing this.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.inplace().filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout:
timeout = min(qs._cacheconf['timeout'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
cache_data = redis_client.get(cache_key)
if cache_data is not None:
return pickle.loads(cache_data)
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
if profile:
self._cacheconf = profile.copy()
self._cacheconf['write_only'] = False
return profile
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, get_model_name(self.model)))
def _cache_key(self, extra=''):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % map(smart_str, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
if extra:
md.update(str(extra))
# Thing only appeared in Django 1.8 and was renamed in Django 1.9
it_class = getattr(self, '_iterator_class', None) or getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheconf['timeout'])
def cache(self, ops=None, timeout=None, write_only=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
write_only - don't try fetching from cache, still write result there
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = [ops]
self._cacheconf['ops'] = set(ops)
if timeout is not None:
self._cacheconf['timeout'] = timeout
if write_only is not None:
self._cacheconf['write_only'] = write_only
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def iterator(self):
# TODO: do not cache empty queries in Django 1.6
superiter = self._no_monkey.iterator
cache_this = self._cacheprofile and 'fetch' in self._cacheconf['ops']
if cache_this:
cache_key = self._cache_key()
if not self._cacheconf['write_only'] and not self._for_write:
# Trying get data from cache
cache_data = redis_client.get(cache_key)
if cache_data is not None:
results = pickle.loads(cache_data)
for obj in results:
yield obj
raise StopIteration
# Cache miss - fallback to overriden implementation
results = []
for obj in superiter(self):
if cache_this:
results.append(obj)
yield obj
if cache_this:
self._cache_results(cache_key, results)
raise StopIteration
def count(self):
if self._cacheprofile and 'count' in self._cacheconf['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
# NOTE: there is no self._iter in Django 1.6+, so we use getattr() for compatibility
if self._result_cache is not None and not getattr(self, '_iter', None):
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same .iterator() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheconf['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheconf['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
if django.VERSION >= (1, 6):
def exists(self):
if self._cacheprofile and 'exists' in self._cacheconf['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
if django.VERSION >= (1, 5):
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
elif django.VERSION >= (1, 4):
def bulk_create(self, objs):
objs = self._no_monkey.bulk_create(self, objs)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = {}
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
cls._cacheprofile = model_profile(cls)
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django 1.7+ migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if instance.pk is not None:
try:
_old_objs[get_thread_id(), sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.pop((get_thread_id(), sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
if not instance._cacheprofile:
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = instance._cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
# Django doesn't allow filters like related_id = 1337.
# So we just hacky strip _id from end of a key
# TODO: make it right, _meta.get_field() should help
filter_key = key[:-3] if key.endswith('_id') else key
cond = {filter_key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
if MAX_GET_RESULTS:
qs = qs[:MAX_GET_RESULTS + 1]
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
# Django 1.5- compatability
if not hasattr(Manager, 'get_queryset'):
def get_queryset(self):
return self.get_query_set()
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if m2m.rel.through == sender)
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
# TODO: always use column names here once Django 1.3 is dropped
instance_field = m2m.m2m_reverse_field_name() if reverse else m2m.m2m_field_name()
objects = sender.objects.filter(**{instance_field: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# DateQuerySet existed in Django 1.7 and earlier
# Values*QuerySet existed in Django 1.8 and earlier
from django.db.models import query
for cls_name in ('ValuesQuerySet', 'ValuesListQuerySet', 'DateQuerySet'):
if hasattr(query, cls_name):
cls = getattr(query, cls_name)
monkey_mix(cls, QuerySetMixin, ['iterator'])
try:
# Use app registry in Django 1.7
from django.apps import apps
admin_used = apps.is_installed('django.contrib.admin')
get_models = apps.get_models
except ImportError:
# Introspect INSTALLED_APPS in older djangos
from django.conf import settings
admin_used = 'django.contrib.admin' in settings.INSTALLED_APPS
from django.db.models import get_models
# Install profile and signal handlers for any earlier created models
for model in get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if admin_used:
from django.contrib.admin.options import ModelAdmin
# Renamed queryset to get_queryset in Django 1.6
method_name = 'get_queryset' if hasattr(ModelAdmin, 'get_queryset') else 'queryset'
@monkey(ModelAdmin, name=method_name)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),)))
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
``` |
[
{
"content": "```python\nfrom unittest.mock import patch, Mock\n\nimport pytest\nimport yaml\n\nfrom asphalt.core import command\n\nDerivedApplication = None\n\n\ndef test_quickstart_application(monkeypatch, tmpdir, capsys):\n def mock_input(text):\n if text == 'Project name: ':\n return 'E... | [
{
"content": "<|memory_start|>```python\nfrom unittest.mock import patch, Mock\n\nimport pytest\nimport yaml\n\nfrom asphalt.core import command\n\nDerivedApplication = None\n\n\ndef test_quickstart_application(monkeypatch, tmpdir, capsys):\n def mock_input(text):\n if text == 'Project name: ':\n ... | ```python
from unittest.mock import patch, Mock
import pytest
import yaml
from asphalt.core import command
DerivedApplication = None
def test_quickstart_application(monkeypatch, tmpdir, capsys):
def mock_input(text):
if text == 'Project name: ':
return 'Example Project'
elif text == 'Top level package name: ':
return 'example'
raise ValueError('Unexpected input: ' + text)
get_distribution = Mock()
get_distribution('asphalt').parsed_version.public = '1.0.0'
monkeypatch.setattr('pkg_resources.get_distribution', get_distribution)
monkeypatch.setattr('builtins.input', mock_input)
tmpdir.chdir()
command.quickstart_application()
# Check that the project directory and the top level package were created
projectdir = tmpdir.join('Example Project')
assert projectdir.check(dir=True)
assert projectdir.join('example').join('__init__.py').check(file=1)
# Check that example/application.py was properly generated
with projectdir.join('example').join('application.py').open() as f:
assert f.read() == """\
from asphalt.core.application import Application
from asphalt.core.context import ApplicationContext
class ExampleProjectApplication(Application):
@coroutine
def start(app_ctx: ApplicationContext):
pass # IMPLEMENT CUSTOM LOGIC HERE
"""
with projectdir.join('config.yml').open() as f:
config_data = f.read()
assert isinstance(yaml.load(config_data), dict)
assert config_data == """\
---
application: example:ExampleProjectApplication
components:
foo: {} # REPLACE ME
settings:
bar: 1 # REPLACE ME
logging:
version: 1
disable_existing_loggers: false
handlers:
console:
class: logging.StreamHandler
formatter: generic
formatters:
generic:
format: "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
root:
handlers: [console]
level: INFO
"""
# Check that setup.py was properly generated
with projectdir.join('setup.py').open() as f:
assert f.read() == """\
from setuptools import setup
setup(
name='example',
version='1.0.0',
description='Example Project',
long_description='FILL IN HERE',
author='FILL IN HERE',
author_email='FILL IN HERE',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3'
],
zip_safe=True,
packages=[
'example'
],
install_requires=[
'asphalt >= 1.0.0, < 2.0.0'
]
)
"""
# Check that another run will raise an error because the directory exists already
pytest.raises(SystemExit, command.quickstart_application)
out, err = capsys.readouterr()
assert err == 'Error: the directory "Example Project" already exists.\n'
@pytest.mark.parametrize('unsafe', [False, True], ids=['safe', 'unsafe'])
def test_run_from_config_file(tmpdir, unsafe):
if unsafe:
app_class = '!!python/name:{}.DerivedApplication'.format(__spec__.name)
else:
app_class = '{}:DerivedApplication'.format(__spec__.name)
with patch('{}.DerivedApplication'.format(__spec__.name)) as cls:
path = tmpdir.join('test.yaml')
path.write("""\
---
application: {}
components:
foo: {{}}
bar: {{}}
settings:
setting: blah
logging:
version: 1
disable_existing_loggers: false
""".format(app_class))
command.run_from_config_file(str(path), unsafe)
components = {'foo': {}, 'bar': {}}
logging = {'version': 1, 'disable_existing_loggers': False}
settings = {'setting': 'blah'}
cls.assert_called_once_with(components=components, logging=logging, settings=settings)
cls().run.assert_called_once_with()
@pytest.mark.parametrize('args, exits', [
(['asphalt', '--help'], True),
(['asphalt'], False)
], ids=['help', 'noargs'])
def test_main_help(capsys, args, exits):
with patch('sys.argv', args):
pytest.raises(SystemExit, command.main) if exits else command.main()
out, err = capsys.readouterr()
assert out.startswith('usage: asphalt [-h]')
def test_main_run():
args = ['/bogus/path', '--unsafe']
patch1 = patch('sys.argv', ['asphalt', 'run'] + args)
patch2 = patch.object(command, 'run_from_config_file')
with patch1, patch2 as run_from_config_file:
command.main()
assert run_from_config_file.called_once_with(args)
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#-*- coding: utf-8 -*-\r\n'''\r\nCreated on 2016. 11. 19\r\nUpdated on 2016. 01. 09\r\n\r\n'''\r\nfrom __future__ import print_function\r\nimport os\r\nimport re\r\nfrom utils import PrettyStringBuilder\r\nfrom utils import Progres... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#-*- coding: utf-8 -*-\r\n'''\r\nCreated on 2016. 11. 19\r\nUpdated on 2016. 01. 09\r\n\r\n'''\r\nfrom __future__ import print_function\r\nimport os\r\nimport re\r\nfrom utils import PrettyStringBuilder\r\nfrom util... | ```python
#-*- coding: utf-8 -*-
'''
Created on 2016. 11. 19
Updated on 2016. 01. 09
'''
from __future__ import print_function
import os
import re
from utils import PrettyStringBuilder
from utils import Progress
import javalang
class Resource(object):
Stopwords = None
EngDictionary = None
@staticmethod
def init():
if Resource.Stopwords is None:
Resource.Stopwords = Resource.load_base(u'stopwords')
if Resource.EngDictionary is None:
Resource.EngDictionary = Resource.load_base(u'en.dict')
@staticmethod
def load_base(_filename):
listDic = {}
f = open(_filename, 'r')
while True:
word = f.readline()
if word is None or len(word)==0: break
if len(word) <= 2: continue
word = word[:-2]
listDic[word] = 1
return listDic
class MethodWorker(object):
__name__ = u'MethodWithComments'
basepath = u'/var/experiments/BugLocalization/dist/features/'
def run(self, _group, _project, _versionName, _srcBase):
print(u'preparing resources...', end=u'')
Resource.init()
print(u'Done')
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
filename = os.path.join(workingPath, u'%s.txt' % _versionName)
if os.path.exists(workingPath) is False: os.makedirs(workingPath)
if os.path.exists(filename) is True: return
methods={}
files = self.listing_files(_srcBase)
progress = Progress(u'Calculating method', 2, 10, True)
progress.set_upperbound(len(files))
progress.start()
for fname in files:
text = open(fname, 'r').read()
key = fname[len(_srcBase) + 1:]
names = []
try:
ADT = javalang.parse.parse(text)
cntConstructors, cntConstComments, cntConstInDic = self.count(ADT, javalang.tree.ConstructorDeclaration)
cntMethods, cntComments, cntMethodInDic = self.count(ADT, javalang.tree.MethodDeclaration)
methods[key] = {'methods':cntMethods+ cntConstructors,
'withComments':cntComments + cntConstComments,
'InDicMethods':cntMethodInDic + cntConstInDic}
except javalang.parser.JavaSyntaxError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0, 'error':'SyntaxError'}
except javalang.tokenizer.LexerError as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'LexerError'}
except Exception as e:
methods[key] = {'methods': 0, 'withComments': 0, 'InDicMethods':0,'error':'Exception'}
progress.check()
progress.done()
self.storeData(filename, methods)
pass
def listing_files(self, _path):
results = []
for root, dirs, files in os.walk(_path):
for fname in files:
if fname.endswith('.java') is False:continue
results.append(os.path.join(root, fname))
return results
def count(self, _ADT, _filter):
cntMethods = 0
cntComments = 0
names = set([])
methodDecls = _ADT.filter(_filter)
for path, node in methodDecls:
cntMethods += 1
names.add(node.name)
if node.documentation is None or len(node.documentation) == 0: continue
doc = javalang.javadoc.parse(node.documentation)
if doc.description is None or len(doc.description) == 0: continue
cntComments += 1
cntInDic = 0
for name in names:
tokens = self.splitCamel(name)
tokens = self.removingStopwords(tokens)
if self.checkingEngDic(tokens) > 0:
cntInDic += 1
return cntMethods, cntComments, cntInDic #, list(names)
def splitCamel(self, token):
corpus = []
token = re.sub(r'([A-Z]+)(in|to|for|at|with|on|off|over)([A-Z]+\w+)', r'\1 \2 \3', token) # Lower case between Upper Cases (ex. XMLtoTEXT)
token = re.sub(r'([a-z0-9])([A-Z]\w+)', r'\1 \2', token) # UpperCase after LowerCase
items = token.split(' ')
for item in items:
item = item.strip()
if item == '': continue
if re.sub(r'[A-Z]+', '', item) != '':
item = re.sub(r'([A-Z]+)([A-Z]+\w+)', r'\1 \2', item) # ALLFiles ==> ALL Files
items2 = item.split(' ')
for item2 in items2:
if item.strip() == '': continue
corpus.append(item2)
else:
corpus.append(item)
return corpus
def removingStopwords(self, _tokens):
newer = set([])
for token in _tokens:
if len(token) <= 2: continue
if token.lower() in Resource.Stopwords: continue
newer.add(token)
return list(newer)
def checkingEngDic(self, _tokens):
count = 0
for token in _tokens:
if token in Resource.EngDictionary:
count += 1
continue
if token.lower() in Resource.EngDictionary:
count += 1
continue
nword = token[0].upper() + token[1:].lower()
if nword in Resource.EngDictionary:
count += 1
return count
#####################################
# managing cache
#####################################
def storeData(self, _filename, _data):
pretty = PrettyStringBuilder(_indent_depth=1)
text = pretty.toString(_data)
f = open(_filename, 'w')
f.write(text)
f.close()
def clear(self, _group, _project):
workingPath = os.path.join(self.basepath, _group, _project, u'sources', u'_methods')
try:
shutil.rmtree(workingPath)
print(u'Removed : %s' % workingPath)
except Exception as e:
print(u'No Path : %s' % workingPath)
###############################################################################################################
###############################################################################################################
###############################################################################################################
import shutil
from commons import Subjects
def clear():
S = Subjects()
for group in S.groups: # ['JBoss']: #
for project in S.projects[group]:
obj = MethodWorker()
obj.clear(group, project)
def work():
S = Subjects()
for group in ['JBoss', 'Wildfly']:#S.groups: # ['JBoss']: #
for project in S.projects[group]:
for versionName in S.bugs[project].keys():
if versionName == 'all' : continue
print(u'MethodWithComments for %s / %s / %s' % (group, project, versionName))
obj = MethodWorker()
obj.run(group, project, versionName, S.getPath_source(group, project, versionName))
if __name__ == "__main__":
#clear()
work()
pass
``` |
[
{
"content": "Here is a code snippet:\n```python\nimport toto\nimport zmq\nimport cPickle as pickle\nimport zlib\nimport logging\nfrom toto.exceptions import *\nfrom toto.workerconnection import WorkerConnection\nfrom threading import Thread\nfrom tornado.options import options\nfrom tornado.gen import Task\nfr... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nimport toto\nimport zmq\nimport cPickle as pickle\nimport zlib\nimport logging\nfrom toto.exceptions import *\nfrom toto.workerconnection import WorkerConnection\nfrom threading import Thread\nfrom tornado.options import options\nfrom tornado.gen... | ```python
import toto
import zmq
import cPickle as pickle
import zlib
import logging
from toto.exceptions import *
from toto.workerconnection import WorkerConnection
from threading import Thread
from tornado.options import options
from tornado.gen import Task
from collections import deque
from zmq.eventloop.ioloop import ZMQPoller, IOLoop, PeriodicCallback
from zmq.eventloop.zmqstream import ZMQStream
from time import time
from uuid import uuid4
from traceback import format_exc
from toto.options import safe_define
WORKER_SOCKET_CONNECT = 'CONNECT'
WORKER_SOCKET_DISCONNECT = 'DISCONNECT'
class ZMQWorkerConnection(WorkerConnection):
'''Use a ``WorkerConnection`` to make RPCs to the remote worker service(s) or worker/router specified by ``address``.
``address`` may be either an enumerable of address strings or a string of comma separated addresses. RPC retries
and timeouts will happen by at most every ``abs(timeout)`` seconds when a periodic callback runs through all active
messages and checks for prolonged requests. This is also the default timeout for any new calls. ``timeout`` must not be
``0``.
Optionally pass any object or module with ``compress`` and ``decompress`` methods as the ``compression`` parameter to
compress messages. The module must implement the same algorithm used on the worker service. By default, messages are not
compressed.
Optionally pass any object or module with ``dumps`` and ``loads`` methods that convert an ``object`` to and from a
``str`` to replace the default ``cPickle`` serialization with a protocol of your choice.
Use ``auto_retry`` to specify whether or not messages should be retried by default. Retrying messages can cause substantial
congestion in your worker service. Use with caution.
'''
def __init__(self, address, timeout=10.0, compression=None, serialization=None, auto_retry=False):
if not address:
self.active_connections = set()
elif isinstance(address, str):
self.active_connections = {i.strip() for i in address.split(',')}
else:
self.active_connections = set(address)
self.message_address = 'inproc://WorkerConnection%s' % id(self)
self.__context = zmq.Context()
self.__queue_socket = self.__context.socket(zmq.PUSH)
self.__queue_socket.bind(self.message_address)
self.__thread = None
self.__timeout = timeout
self.__callbacks = {}
self.__queued_messages = {}
self.__message_auto_retry = {}
self.__message_timeouts = {}
self.__ioloop = None
self.__auto_retry = auto_retry
self.loads = serialization and serialization.loads or pickle.loads
self.dumps = serialization and serialization.dumps or pickle.dumps
self.compress = compression and compression.compress or (lambda x: x)
self.decompress = compression and compression.decompress or (lambda x: x)
def invoke(self, method, parameters={}, callback=None, timeout=0, auto_retry=None, await=False):
'''Invoke a ``method`` to be run on a remote worker process with the given ``parameters``. If specified, ``callback`` will be
invoked with any response from the remote worker. By default the worker will timeout or retry based on the settings of the
current ``WorkerConnection`` but ``timeout`` and ``auto_retry`` can be used for invocation specific behavior.
Note: ``callback`` will be invoked with ``{'error': 'timeout'}`` on ``timeout`` if ``auto_retry`` is false. Invocations
set to retry will never timeout and will instead be re-sent until a response is received. This behavior can be useful for
critical operations but has the potential to cause substantial congestion in the worker system. Use with caution. Negative
values of ``timeout`` will prevent messages from ever expiring or retrying regardless of ``auto_retry``. The default
values of ``timeout`` and ``auto_retry`` cause a fallback to the values used to initialize ``WorkerConnection``.
Passing ``await=True`` will wrap the call in a ``tornado.gen.Task`` allowing you to ``yield`` the response from the worker.
The ``Task`` replaces ``callback`` so any user supplied callback will be ignored when ``await=True``.
Alternatively, you can invoke methods with ``WorkerConnection.<module>.<method>(*args, **kwargs)``
where ``"<module>.<method>"`` will be passed as the ``method`` argument to ``invoke()``.
'''
if await:
return Task(lambda callback: self._queue_message(self.compress(self.dumps({'method': method, 'parameters': parameters})), callback, timeout, auto_retry))
self._queue_message(self.compress(self.dumps({'method': method, 'parameters': parameters})), callback, timeout, auto_retry)
def add_connection(self, address):
'''Connect to the worker at ``address``. Worker invocations will be round robin load balanced between all connected workers.'''
self._queue_message(address, command=WORKER_SOCKET_CONNECT)
def remove_connection(self, address):
'''Disconnect from the worker at ``address``. Worker invocations will be round robin load balanced between all connected workers.'''
self._queue_message(address, command=WORKER_SOCKET_DISCONNECT)
def set_connections(self, addresses):
'''A convenience method to set the connected addresses. A connection will be made to any new address included in the ``addresses``
enumerable and any currently connected address not included in ``addresses`` will be disconnected. If an address in ``addresses``
is already connected, it will not be affected.
'''
addresses = set(addresses)
to_remove = self.active_connections - addresses
to_add = addresses - self.active_connections
for a in to_remove:
self.remove_connection(a)
for a in to_add:
self.add_connection(a)
def __len__(self):
return len(self.__queued_messages)
def _queue_message(self, message, callback=None, timeout=0, auto_retry=None, command=''):
if not self.__ioloop:
self.start()
message_id = str(uuid4())
if callback:
self.__callbacks[message_id] = callback
if timeout != 0:
self.__message_timeouts[message_id] = timeout
if auto_retry is not None:
self.__message_auto_retry[message_id] = auto_retry
self.__queue_socket.send_multipart((command, message_id, message))
def log_error(self, error):
logging.error(repr(error))
def start(self):
if self.__ioloop:
return
def loop():
self.__ioloop = IOLoop()
queue_socket = self.__context.socket(zmq.PULL)
queue_socket.connect(self.message_address)
queue_stream = ZMQStream(queue_socket, self.__ioloop)
def receive_response(message, response_override=None):
self.__queued_messages.pop(message[1], None)
self.__message_timeouts.pop(message[1], None)
callback = self.__callbacks.pop(message[1], None)
if callback:
try:
callback(response_override or self.loads(self.decompress(message[2])))
except Exception as e:
self.log_error(e)
callback({'error': e})
def create_worker_stream():
def close_callback():
logging.info('Worker stream closed')
create_worker_stream()
worker_socket = self.__context.socket(zmq.DEALER)
for address in self.active_connections:
worker_socket.connect(address)
worker_stream = ZMQStream(worker_socket, self.__ioloop)
worker_stream.on_recv(receive_response)
worker_stream.set_close_callback(close_callback)
self._worker_stream = worker_stream
create_worker_stream()
def queue_message(message):
if message[0]:
if message[0] == WORKER_SOCKET_CONNECT and message[2] not in self.active_connections:
self.active_connections.add(message[2])
self._worker_stream.socket.connect(message[2])
elif message[0] == WORKER_SOCKET_DISCONNECT and message[2] in self.active_connections:
self.active_connections.remove(message[2])
self._worker_stream.socket.disconnect(message[2])
return
self.__queued_messages[message[1]] = (time(), message)
try:
self._worker_stream.send_multipart(message)
except IOError as e:
self.log_error(e)
logging.info('Reconnecting')
create_worker_stream()
except Exception as e:
self.log_error(e)
queue_stream.on_recv(queue_message)
def timeout_message():
now = time()
for message, retry in [(item[1], self.__message_auto_retry.get(item[1][1], self.__auto_retry)) for item, t in ((i, self.__message_timeouts.get(i[1][1], self.__timeout)) for i in self.__queued_messages.itervalues()) if t >= 0 and (item[0] + t < now)]:
if retry:
logging.info('Worker timeout, requeuing ' + message[1])
queue_message(message)
else:
receive_response(('', message[1]), {'error': 'timeout'})
timeout_callback = PeriodicCallback(timeout_message, int(abs(self.__timeout * 1000.0)), io_loop = self.__ioloop)
timeout_callback.start()
self.__ioloop.start()
self.__thread = None
self.__thread = Thread(target=loop)
self.__thread.daemon = True
self.__thread.start()
def stop(self):
if self.__ioloop:
self.__ioloop.stop()
def join(self):
if self.__thread:
self.__thread.join()
@classmethod
def instance(cls):
'''Returns the default instance of ``ZMQWorkerConnection`` as configured by the options prefixed
with ``worker_``, instantiating it if necessary. Import the ``workerconnection`` module within
your ``TotoService`` and run it with ``--help`` to see all available options.
'''
if not hasattr(cls, '_instance'):
cls._instance = cls(options.worker_address, timeout=options.worker_timeout, compression=options.worker_compression_module and __import__(options.worker_compression_module), serialization=options.worker_serialization_module and __import__(options.worker_serialization_module), auto_retry=options.worker_auto_retry)
return cls._instance
``` |
[
{
"content": "Here is a code file:\n```python\nfrom math import ceil\n\nfrom django.db import IntegrityError, connection, models\nfrom django.db.models.deletion import Collector\nfrom django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE\nfrom django.test import TestCase, skipIfDBFeature, skipUnlessDBFe... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nfrom math import ceil\n\nfrom django.db import IntegrityError, connection, models\nfrom django.db.models.deletion import Collector\nfrom django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE\nfrom django.test import TestCase, skipIfDBFeature... | ```python
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.deletion import Collector
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models import (
MR, A, Avatar, Base, Child, HiddenUser, HiddenUserProfile, M, M2MFrom,
M2MTo, MRNull, Parent, R, RChild, S, T, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
msg = (
"Cannot delete some instances of model 'R' because they are "
"referenced through a protected foreign key: 'A.protect'"
)
with self.assertRaisesMessage(IntegrityError, msg):
a.protect.delete()
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
A models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append([a.pk for a in obj.setnull_set.all()])
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
RChild.objects.create(r_ptr=r)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = ceil(len(objs) / batch_size)
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE / batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE / GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = ceil(TEST_SIZE / batch_size)
expected_num_queries += ceil(TEST_SIZE / GET_ITERATOR_CHUNK_SIZE) + 2
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_delete_with_keeping_parents_relationships(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
parent_referent_id = S.objects.create(r=child.r_ptr).pk
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
self.assertTrue(S.objects.filter(pk=parent_referent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_proxied_model_duplicate_queries(self):
"""
#25685 - Deleting instances of a model with existing proxy
classes should not issue multiple queries during cascade
deletion of referring models.
"""
avatar = Avatar.objects.create()
# One query for the Avatar table and a second for the User one.
with self.assertNumQueries(2):
avatar.delete()
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_instance_set_pk_none(self):
u = User.objects.create()
# User can be fast-deleted.
collector = Collector(using='default')
self.assertTrue(collector.can_fast_delete(u))
u.delete()
self.assertIsNone(u.pk)
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
self.assertNumQueries(2, c.delete)
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
#25932 - Fast deleting on backends that don't have the
`no_update_can_self_select` feature should work even if the specified
filter doesn't match any row.
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc='missing').delete(),
(0, {'delete.User': 0})
)
``` |
[
{
"content": "```python\nimport os\r\nimport subprocess\r\nfrom collections import defaultdict\r\nfrom datetime import datetime\r\nimport csv\r\nfrom csv import DictReader\r\nimport math\r\nfrom glob import glob\r\n\r\n# Data locations\r\nloc_train = \"../data/train.csv\"\r\nloc_test = \"../data/test.csv\"\r\nl... | [
{
"content": "<|memory_start|>```python\nimport os\r\nimport subprocess\r\nfrom collections import defaultdict\r\nfrom datetime import datetime\r\nimport csv\r\nfrom csv import DictReader\r\nimport math\r\nfrom glob import glob\r\n\r\n# Data locations\r\nloc_train = \"../data/train.csv\"\r\nloc_test = \"../data... | ```python
import os
import subprocess
from collections import defaultdict
from datetime import datetime
import csv
from csv import DictReader
import math
from glob import glob
# Data locations
loc_train = "../data/train.csv"
loc_test = "../data/test.csv"
loc_labels = "../data/trainLabels.csv"
loc_best = "test.pred2.csv" # best submission
loc_model_prefix = "../data/"
loc_preds_prefix = "../data/"
# Will be created
loc_test_vw = "../data/test.vw"
loc_train_vw = "../data/train_y33.vw"
loc_train_vw_temp = "../data/train_yn_temp.vw" # used for relabelling
loc_kaggle_submission = "test.pred.vw.csv"
def load_data(loc_csv, nrows=0):
with open(loc_csv) as fd:
reader = csv.reader(fd)
header = reader.next()[1:]
labels = []
for row in reader:
row = map(int, row)
labels.append(row[1:])
return labels
def to_vw(loc_csv, loc_out, y, y_nr=33, train=True):
print("\nConverting %s"%loc_csv)
with open(loc_out,"wb") as outfile:
distribution = 0
for linenr, row in enumerate( DictReader(open(loc_csv,"rb")) ):
hash_features = ["x35","x91","x94","x95","x34","x4","x65","x64","x61","x3"]
yes_no_features = ["x92","x93","x101","x103","x130","x102","x10","x11","x12","x13","x14","x25","x24","x26","x32","x33","x30","x31","x141","x140","x142","x45","x44","x43","x42","x41","x2","x1","x55","x56","x57","x129","x128","x127","x126","x105","x63","x62","x87","x86","x85","x116","x117","x115","x104","x74","x75","x72","x73","x71"]
pos_features = ["x23","x22","x113","x114","x53","x54","x138","x139"]
float_features = ["x70","x77","x96","x97","x98","x99","x107","x135","x100","x137","x132","x19","x16","x29","x28","x36","x37","x38","x39","x122","x144","x145","x47","x40","x110","x119","x60","x120","x121","x123","x124","x125","x59","x52","x50","x7","x6","x8","x9","x40","x144","x145","x122","x39","x38","x37","x36"]
n_h = ""
n_b = ""
n_p = ""
n_f = ""
n_r = ""
for k in row:
if k is not "id":
if k in hash_features:
n_h += " %s_%s"%(k,row[k])
elif k in yes_no_features:
n_b += " %s_%s"%(k,row[k])
elif k in pos_features:
n_p += " %s_%s"%(k,row[k])
elif k in float_features and row[k] is not "":
n_f += " %s:%s"%(k,row[k])
elif k in float_features and row[k] is "":
n_f += " %s_%s"%(k,row[k])
else:
n_r += " %s_%s"%(k,row[k])
if train:
label = y[linenr][y_nr-1]
if label == 1:
distribution += 1
else:
label = -1
else:
label = 1
id = row["id"]
outfile.write("%s '%s |h%s |b%s |p%s |f%s |r%s\n"%(label,id,n_h,n_b,n_p,n_f,n_r) )
if linenr % 100000 == 0:
print("%s\t%s"%(linenr,distribution))
print(distribution)
def relabel_vw(loc_vw, loc_out, loc_labels, y, y_i = 0):
print("Relabelling to dataset %s..."%loc_out)
start = datetime.now()
with open(loc_out,"wb") as outfile:
for e, line in enumerate( open( loc_vw, "rb") ):
if y[e][y_i-1] == 0:
new_id = -1
else:
new_id = 1
outfile.write( "%s %s\n"%(new_id," ".join(line.strip().split()[1:])) )
print("\ncompleted in :( %s\n"%(str(datetime.now()-start)))
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def to_kaggle(loc_preds, loc_best_sub, loc_out_sub, y_nr):
preds = {}
for e, line in enumerate( open(loc_preds,"rb") ):
preds[line.strip().split()[1]] = sigmoid(float(line.strip().split()[0]))
with open(loc_out_sub,"wb") as outfile:
for e, line in enumerate( open(loc_best_sub,"rb") ):
row = line.strip().split(",")
if e == 0:
outfile.write(line)
elif "y"+str(y_nr)+"," not in line:
outfile.write(line)
else:
outfile.write("%s,%s\n"%(row[0],preds[row[0].replace("_y"+str(y_nr),"")]))
print("Finished writing Kaggle submission: %s"%loc_out_sub)
if __name__ == "__main__":
#Load labels, remove the id
#y = load_data(loc_labels)
#Create train set for label y33, and a test set with dummy labels
#to_vw(loc_train, loc_train_vw, y, y_nr=33, train=True)
#to_vw(loc_test, loc_test_vw, y, train=False)
#Train and test VW now
#Add the VW predictions to our best submission file
to_kaggle("preds_y33.p.txt", loc_best, loc_kaggle_submission, y_nr=33)
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# ~*~ coding: utf-8 ~*~\n\"\"\"Module that defines a Marshmallow field for Peewee's foreign keys.\"\"\"\n\nfrom marshmallow import fields\n\nfrom .mixin import FleakerFieldMixin\n\n\nclass ForeignKeyField(fields.... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# ~*~ coding: utf-8 ~*~\n\"\"\"Module that defines a Marshmallow field for Peewee's foreign keys.\"\"\"\n\nfrom marshmallow import fields\n\nfrom .mixin import FleakerFieldMixin\n\n\nclass Foreign... | ```python
# ~*~ coding: utf-8 ~*~
"""Module that defines a Marshmallow field for Peewee's foreign keys."""
from marshmallow import fields
from .mixin import FleakerFieldMixin
class ForeignKeyField(fields.Integer, FleakerFieldMixin):
"""Marshmallow field that can be used with Peewee's foreign key setup.
Turns a field named ``${relation_name}_id`` into ``${relation_name}`` on
load, and then back to ``${relation_name}_id`` on dump again.
This fixes a discrepancy between PeeWee and common API usage. Common API
usage for DB ID's is to name them ``${relation_name}_id``, for clarity.
However, PeeWee only accepts FK values set through ``${relation_name}``, so
fix it!
This field is effect by the following schema context variable:
- ``'convert_fks'``: This will prevent the field from being renamed when
serialized. This is useful if you will be double deserializing the data
and you don't wanted it converted after the first pass. This flow is
present for Webargs. By default, this field will rename the key when
deserialzed.
"""
def _jsonschema_type_mapping(self):
"""Define the JSON Schema type for this field."""
return {
'type': 'number',
'format': 'integer',
}
def _add_to_schema(self, field_name, schema):
"""Set the ``attribute`` attr to the field in question so this always
gets deserialzed into the field name without ``_id``.
Args:
field_name (str): The name of the field (the attribute name being
set in the schema).
schema (marshmallow.Schema): The actual parent schema this field
belongs to.
"""
super(ForeignKeyField, self)._add_to_schema(field_name, schema)
if self.get_field_value('convert_fks', default=True):
self.attribute = field_name.replace('_id', '')
def _serialize(self, value, attr, obj):
"""Grab the ID value off the Peewee model so we serialize an ID back.
"""
# this might be an optional field
if value:
value = value.id
return super(ForeignKeyField, self)._serialize(value, attr, obj)
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom flask import current_app\n\nfrom changes.backends.base import UnrecoverableException\nfrom changes.config import db\nfrom changes.constants import Status, Result\nfrom changes.jobs.sync_job import sync_job\nfrom changes.models import Job, JobPlan\nfro... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom flask import current_app\n\nfrom changes.backends.base import UnrecoverableException\nfrom changes.config import db\nfrom changes.constants import Status, Result\nfrom changes.jobs.sync_job import sync_job\nfrom changes.models import J... | ```python
from flask import current_app
from changes.backends.base import UnrecoverableException
from changes.config import db
from changes.constants import Status, Result
from changes.jobs.sync_job import sync_job
from changes.models import Job, JobPlan
from changes.queue.task import tracked_task
def abort_create(task):
job = Job.query.get(task.kwargs['job_id'])
job.status = Status.finished
job.result = Result.aborted
db.session.add(job)
db.session.commit()
current_app.logger.exception('Unrecoverable exception creating job %s', job.id)
@tracked_task(on_abort=abort_create, max_retries=10)
def create_job(job_id):
job = Job.query.get(job_id)
if not job:
return
# we might already be marked as finished for various reasons
# (such as aborting the task)
if job.status == Status.finished:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=job.id)
if implementation is None:
# TODO(dcramer): record a FailureReason?
job.status = Status.finished
job.result = Result.failed
current_app.logger.exception('No build plan set %s', job_id)
return
try:
implementation.execute(job=job)
except UnrecoverableException:
job.status = Status.finished
job.result = Result.aborted
current_app.logger.exception('Unrecoverable exception creating %s', job_id)
return
sync_job.delay(
job_id=job.id.hex,
task_id=job.id.hex,
parent_task_id=job.build_id.hex,
)
``` |
[
{
"content": "Here is a code file:\n```python\nimport numpy as np\n\nimport unicodecsv\nimport codecs\nimport goslate\nimport sqlite3\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\n\ndef imp_load(filename):\n\n texts = []\n books = []\... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport numpy as np\n\nimport unicodecsv\nimport codecs\nimport goslate\nimport sqlite3\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\n\ndef imp_load(filename):\n\n texts = []\... | ```python
import numpy as np
import unicodecsv
import codecs
import goslate
import sqlite3
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def imp_load(filename):
texts = []
books = []
chapters = []
verses = []
# Read in a whole bible
with codecs.open(filename,encoding='utf-8') as f:
bibletext = f.read()
# Split by verse
bible_verses = bibletext.split('$$$')
# Process verses
for verse in bible_verses:
try:
verse = verse.split('\n',1)
ref = verse[0].strip()
text = verse[1].strip()
ref = ref.split('.')
book = ref[0].strip()
cnum = ref[1].strip()
vnum = ref[2].strip()
texts.append(text)
books.append(book)
chapters.append(cnum)
verses.append(vnum)
except IndexError:
pass
return books, chapters, verses, texts
def calculate_similarity(texts, translations):
# Train the tf-idf thingy on the translated texts
tfidf = TfidfVectorizer().fit_transform(texts)
# Build a matrix representation of the similarities between verses
# This will yield a simmetrical matrix
# TODO: For performance and logical reasons: Only calculate similarity for nearby verses, assume others 0 ?
M = np.array([linear_kernel(tfidf[j:j+1], tfidf).flatten() for j in range(len(texts))])
# Hack(ish): Set similarity with verses of same translation to 0
for i in range(len(M)):
for j in range(i+1):
if translations[i] == translations[j]:
M[i][j] = M[j][i] = 0
# print np.round(M*100,0)
return M
def find_best_couple(M,t):
"""
find best couple in similarity matrix M
the translation(s) of each verse is given in t
"""
# assume values are 0 for verses in same translation
i_max, j_max = np.unravel_index(M.argmax(), M.shape)
P_max = M[i_max, j_max]
return i_max, j_max, P_max
def merge_nodes(M,a,b):
"""
merge indices a and b in similarity matrix M into one supernode,
averaging similarity values between the supernode and other verses
"""
N = len(M)
# calculate a new row (and column) for the supernode
supernode_similarity = [np.average([M[k][a],M[k][b]]) for k in range(N)]
# append the row (this will jumble the verse order...)
newM = np.append(M, np.array(supernode_similarity)[None,:], axis=0)
# append 0 (supernode's similarity with itself) to the row and add it as a column
supernode_similarity.append(0.)
newM = np.append(newM, np.array(supernode_similarity)[:,None], axis=1)
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=0)
# rather make rows a and b 0
# to preserve verse indices, don't delete
# newM = np.delete(newM,[a,b],axis=1)
# rather make columns a and b 0
newM[:,a] = np.zeros_like(newM[:,a])
newM[:,b] = np.zeros_like(newM[:,b])
newM[a,:] = np.zeros_like(newM[a,:])
newM[b,:] = np.zeros_like(newM[b,:])
return newM
def group_verses(M, t, numT, P_min = 0.1):
"""
Automatically group verses
t = the translation of each verse
numT = max number of verses in a group = number of translations
"""
t = [[val] for val in t]
N = len(M)
groups = {} # keyed by supernode index
iteration = 0
max_iteration = N
while iteration < max_iteration:
iteration += 1
#print "\t\tGrouping: iteration ",iteration
i,j,P = find_best_couple(M, t)
#print "\t\tbest couple: ",i,j,P
# Stop iterating if similarity gets too low...
if P < P_min:
break;
group = []
# merge supernodes if they exist, else merge nodes:
if i in groups:
group.extend(groups[i])
else:
group.append(i)
if j in groups:
group.extend(groups[j])
else:
group.append(j)
# group now contains all of the verses for the new supernode
if len(group) > numT:
# this grouping is invalid
# prevent it from happening again by making P 0
M[i][j] = 0
else:
# valid grouping. save it.
# Remove the previous supernode groups
if i in groups:
del groups[i]
if j in groups:
del groups[j]
# Create the supernode
M = merge_nodes(M,i,j)
t.append(t[i] + t[j])
# Save the index of the new supernode
supernode_index = len(M)-1
groups[supernode_index] = group
print "\r\t\t",len(groups),
print
return groups
def align(input_translations, input_filenames, output_filename):
"""
Load one csv file for each translation
Group, align and sort the verses
Export a csv file containing a column for each translation
"""
if len(input_translations) != len(input_filenames):
raise ValueError("Number of translations and number of files must be the same")
M = len(input_translations)
# Load pre-translated data
print "\tLoading data from files..."
#translations,books,chapters,verses,texts_original,texts_en = load_translated_verses(input_translations, input_filenames)
translations,chapters,verses,texts_original,texts_en = csv_import_translated_books(input_filenames, input_translations)
# Calculate similarity between verses
print "\tCalculating similarity matrix..."
similarity = calculate_similarity(texts_en, translations)
def canonical_group_cmp(a, b):
"""
Define sort order for groups of verses
"""
# find two verses from the same translation to compare their canonical order
for i in a:
for j in b:
if translations[i] == translations[j]:
return i - j
# Group the verses
print "\tGrouping verses..."
groups = group_verses(similarity, translations, 3).values()
# print groups
# Put groups back into canonical order
print "\tSorting verses..."
groups.sort(canonical_group_cmp)
# prepare data for csv export
print "\tPreparing csv data..."
csv_rows = []
csv_rows.append(input_translations) # headers
for group in groups:
# create a row in the csv file for every group
if len(group) == M:
# rows where all translations are present, are quick:
group.sort()
row = [u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]) for verse in group]
else:
# for other rows, we have to find the missing translation, and substitute it with a blank
row = []
for translation in input_translations:
found = False
for verse in group:
if translation == translations[verse]:
# verse found for this translation
row.append(u"{0}:{1}:{2}".format(chapters[verse],verses[verse],texts_original[verse]))
found = True
break
if not found:
# fill in a blank
row.append("")
csv_rows.append(row)
# print csv_rows
# Export to csv file
print "\tWriting csv file..."
with open(output_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerows(csv_rows)
print "\tDone!"
def translate_csv(in_filename, language, out_filename):
"""
Load a bible book from csv file
translate it
save it as a new file
"""
# Create a translator object
gs = goslate.Goslate(retry_times=100, timeout=100)
# Load the bible book to be translated
chapters,verses,texts_original = csv_import_book(in_filename)
# Batch translate the verses if necessary
if language != 'en':
print "Batch translating {0} verses from '{1}' to 'en'".format(len(texts_original), language)
texts_translated = gs.translate(texts_original, 'en', language)
else:
print "Not translating {0} verses already in 'en'".format(len(texts_original))
texts_translated = texts_original
# Write to CSV file
rows = zip(chapters, verses, texts_original, texts_translated)
with open(out_filename,'wb') as f:
cw = unicodecsv.writer(f, encoding='utf-8')
cw.writerow(['chapter','verse','text_original','text_english'])
cw.writerows(rows)
def csv_import_book(filename):
"""
load bible book from csv file
"""
texts = []
chapters = []
verses = []
# Read in a whole file of verses
with open(filename,'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text in cr:
chapters.append(int(cnum)) # parse integer
verses.append(int(vnum)) # parse integer
texts.append(text.strip()) # remove surrounding whitespace
# return results
return chapters,verses,texts
def csv_export_book(filename, rows=[], chapters=[], verses=[], texts=[]):
if not len(rows) > 0:
rows = zip(chapters, verses, texts)
with open(filename,'wb') as f:
cw = unicodecsv.writer(f,encoding='utf-8')
cw.writerow(['chapter','verse','text'])
cw.writerows(rows)
def csv_import_translated_book(input_file):
"""
import a single translated book from a single translation from single csv file
"""
texts_en = []
texts_original = []
chapters = []
verses = []
# Read in a whole (Google translated) file of verses
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
header = cr.next() # skip header
# Process verses
for cnum,vnum,text_original,text_en in cr:
chapters.append(int(cnum))
verses.append(int(vnum))
texts_original.append(text_original.strip())
texts_en.append(text_en.strip())
# return results
return chapters,verses,texts_original,texts_en
def csv_import_translated_books(input_files, input_translations):
"""
import a single book from M translations from M csv files
"""
if len(input_files) != len(input_translations):
raise ValueError("Number of input files and translations are not the same")
translations = []
chapters = []
verses = []
texts_original = []
texts_en = []
for in_file,translation in zip(input_files,input_translations):
c,v,o,e = csv_import_translated_book(in_file)
chapters.extend(c)
verses.extend(v)
texts_original.extend(o)
texts_en.extend(e)
translations.extend([translation]*len(e))
return translations,chapters,verses,texts_original,texts_en
def csv_import_aligned_book(input_file):
"""
Import a single aligned book (e.g. after it is checked by humans)
"""
groups = []
with open(input_file, 'rb') as f:
cr = unicodecsv.reader(f, encoding='utf-8')
translations = cr.next() # header contains translation names
for row in cr:
group = {}
for i in range(len(translations)):
verse = row[i].split(':',3)
group[translations[i]] = {
'chapternum':int(verse[0]),
'versenum':int(verse[1]),
'text':verse[2].strip()
}
groups.append(group)
return groups
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# coding=utf-8\n#\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# coding=utf-8\n#\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Lice... | ```python
# coding=utf-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tft.vocabulary and tft.compute_and_apply_vocabulary."""
import os
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform.beam import analyzer_impls
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.beam import tft_unit
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_metadata.proto.v0 import schema_pb2
mock = tf.compat.v1.test.mock
class VocabularyIntegrationTest(tft_unit.TransformTestCase):
def setUp(self):
tf.compat.v1.logging.info('Starting test case: %s', self._testMethodName)
self._context = beam_impl.Context(force_tf_compat_v1=True)
self._context.__enter__()
super().setUp()
def _VocabFormat(self):
return 'text'
_WITH_LABEL_PARAMS = tft_unit.cross_named_parameters([
dict(
testcase_name='_string',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
expected_vocab_file_contents=[(b'goodbye', 1.9753224),
(b'aaaaa', 1.6600707),
(b'hello', 1.2450531)]),
dict(
testcase_name='_int64',
x_data=[3, 3, 3, 1, 2, 2, 1, 1, 2, 2, 1, 1],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[(b'1', 1.9753224), (b'2', 1.6600707),
(b'3', 1.2450531)]),
], [
dict(
testcase_name='with_label',
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
min_diff_from_avg=0.0,
store_frequency=True),
])
@tft_unit.named_parameters(*([
dict(
testcase_name='_unadjusted_mi_binary_label',
x_data=[
b'informative', b'informative', b'informative', b'uninformative',
b'uninformative', b'uninformative', b'uninformative',
b'uninformative_rare', b'uninformative_rare'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 0, 1, 1, 0, 0, 1],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[
(b'informative', 1.7548264),
(b'uninformative', 0.33985),
(b'uninformative_rare', 0.169925),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=False,
store_frequency=True),
dict(
testcase_name='_unadjusted_mi_multi_class_label',
x_data=[
b'good_predictor_of_0', b'good_predictor_of_0',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_2', b'good_predictor_of_2',
b'good_predictor_of_2', b'good_predictor_of_1',
b'good_predictor_of_1', b'weak_predictor_of_1',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_1', b'good_predictor_of_1',
b'weak_predictor_of_1'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[
(b'good_predictor_of_2', 6.9656613),
(b'good_predictor_of_1', 6.5969828),
(b'good_predictor_of_0', 6.339692),
(b'weak_predictor_of_1', 0.684463),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=False,
store_frequency=True),
dict(
testcase_name='_unadjusted_mi_binary_label_with_weights',
x_data=[
b'informative_1', b'informative_1', b'informative_0',
b'informative_0', b'uninformative', b'uninformative',
b'informative_by_weight', b'informative_by_weight'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 0, 0, 0, 1, 0, 1],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
# uninformative and informative_by_weight have the same co-occurrence
# relationship with the label but will have different importance
# values due to the weighting.
expected_vocab_file_contents=[
(b'informative_0', 3.1698803),
(b'informative_1', 1.1698843),
(b'informative_by_weight', 0.6096405),
(b'uninformative', 0.169925),
],
weight_data=[1, 1, 1, 1, 1, 1, 1, 5],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
min_diff_from_avg=0.0,
use_adjusted_mutual_info=False,
store_frequency=True),
dict(
testcase_name='_unadjusted_mi_binary_label_min_diff_from_avg',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
# All features are weak predictors, so all are adjusted to zero.
expected_vocab_file_contents=[
(b'hello', 0.0),
(b'goodbye', 0.0),
(b'aaaaa', 0.0),
],
use_adjusted_mutual_info=False,
min_diff_from_avg=2.0,
store_frequency=True),
dict(
testcase_name='_adjusted_mi_binary_label',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[
(b'goodbye', 1.4070794),
(b'aaaaa', 0.9987448),
(b'hello', 0.5017178),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=True,
store_frequency=True),
dict(
testcase_name='_adjusted_mi_binary_label_int64_feature',
x_data=[3, 3, 3, 1, 2, 2, 1, 1, 2, 2, 1, 1],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[
(b'1', 1.4070794),
(b'2', 0.9987448),
(b'3', 0.5017178),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=True,
store_frequency=True),
dict(
testcase_name='_adjusted_mi_multi_class_label',
x_data=[
b'good_predictor_of_0', b'good_predictor_of_0',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_2', b'good_predictor_of_2',
b'good_predictor_of_2', b'good_predictor_of_1',
b'good_predictor_of_1', b'weak_predictor_of_1',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_1', b'good_predictor_of_1',
b'weak_predictor_of_1'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[0, 0, 0, 1, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[
(b'good_predictor_of_1', 5.4800901),
(b'good_predictor_of_2', 5.3861019),
(b'good_predictor_of_0', 4.9054722),
(b'weak_predictor_of_1', -0.9748023),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=True,
store_frequency=True),
# TODO(b/128831096): Determine correct interaction between AMI and weights
dict(
testcase_name='_adjusted_mi_binary_label_with_weights',
x_data=[
b'informative_1', b'informative_1', b'informative_0',
b'informative_0', b'uninformative', b'uninformative',
b'informative_by_weight', b'informative_by_weight'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 0, 0, 0, 1, 0, 1],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
weight_data=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
# uninformative and informative_by_weight have the same co-occurrence
# relationship with the label but will have different importance
# values due to the weighting.
expected_vocab_file_contents=[
(b'informative_0', 2.3029856),
(b'informative_1', 0.3029896),
(b'informative_by_weight', 0.1713041),
(b'uninformative', -0.6969697),
],
min_diff_from_avg=0.0,
use_adjusted_mutual_info=True,
store_frequency=True),
dict(
testcase_name='_adjusted_mi_min_diff_from_avg',
x_data=[
b'good_predictor_of_0', b'good_predictor_of_0',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_0', b'good_predictor_of_1',
b'good_predictor_of_1', b'good_predictor_of_1',
b'good_predictor_of_1', b'good_predictor_of_0',
b'good_predictor_of_1', b'good_predictor_of_1',
b'good_predictor_of_1', b'weak_predictor_of_1',
b'weak_predictor_of_1'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
# With min_diff_from_avg, the small AMI value is regularized to 0
expected_vocab_file_contents=[
(b'good_predictor_of_0', 1.8322128),
(b'good_predictor_of_1', 1.7554416),
(b'weak_predictor_of_1', 0),
],
use_adjusted_mutual_info=True,
min_diff_from_avg=1.0,
store_frequency=True),
dict(
testcase_name='_labels_weight_and_frequency',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
weight_data=[
0.3, 0.4, 0.3, 1.2, 0.6, 0.7, 1.0, 1.0, 0.6, 0.7, 1.0, 1.0
],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
expected_vocab_file_contents=[
(b'aaaaa', 1.5637185),
(b'goodbye', 0.8699492),
(b'hello', 0.6014302),
],
min_diff_from_avg=0.0,
store_frequency=True),
# fingerprints by which each of the tokens will be sorted if fingerprint
# shuffling is used.
# 'ho ho': '1b3dd735ddff70d90f3b7ba5ebf65df521d6ca4d'
# 'world': '7c211433f02071597741e6ff5a8ea34789abbf43'
# 'hello': 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
# 'hi': 'c22b5f9178342609428d6f51b2c5af4c0bde6a42'
# '1': '356a192b7913b04c54574d18c28d46e6395428ab'
# '2': 'da4b9237bacccdf19c0760cab7aec4a8359010b0'
# '3': '77de68daecd823babbb58edb1c8e14d7106e83bb'
dict(
testcase_name='_string_feature_with_frequency_and_shuffle',
x_data=[b'world', b'hello', b'hello'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
expected_vocab_file_contents=[(b'world', 1), (b'hello', 2)],
fingerprint_shuffle=True,
store_frequency=True),
dict(
testcase_name='_string_feature_with_frequency_and_no_shuffle',
x_data=[b'hi', b'ho ho', b'ho ho'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
expected_vocab_file_contents=[(b'ho ho', 2), (b'hi', 1)],
store_frequency=True),
dict(
testcase_name='_string_feature_with_no_frequency_and_shuffle',
x_data=[b'world', b'hello', b'hello'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
expected_vocab_file_contents=[b'world', b'hello'],
fingerprint_shuffle=True),
dict(
testcase_name='_string_feature_with_no_frequency_and_no_shuffle',
x_data=[b'world', b'hello', b'hello'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
expected_vocab_file_contents=[b'hello', b'world']),
dict(
testcase_name='_int_feature_with_frequency_and_shuffle',
x_data=[1, 2, 2, 3],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[(b'1', 1), (b'3', 1), (b'2', 2)],
fingerprint_shuffle=True,
store_frequency=True),
dict(
testcase_name='_int_feature_with_frequency_and_no_shuffle',
x_data=[2, 1, 1, 1],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[(b'1', 3), (b'2', 1)],
store_frequency=True),
dict(
testcase_name='_int_feature_with_no_frequency_and_shuffle',
x_data=[1, 2, 2, 3],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[b'1', b'3', b'2'],
fingerprint_shuffle=True),
dict(
testcase_name='_int_feature_with_no_frequency_and_no_shuffle',
x_data=[1, 2, 2, 3],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[b'2', b'3', b'1']),
dict(
testcase_name='_int_feature_with_top_k',
x_data=[111, 2, 2, 3],
top_k=2,
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
expected_vocab_file_contents=[b'2', b'3']),
] + _WITH_LABEL_PARAMS))
def testVocabulary(self,
x_data,
x_feature_spec,
label_data=None,
label_feature_spec=None,
weight_data=None,
weight_feature_spec=None,
expected_vocab_file_contents=None,
**kwargs):
"""Test tft.Vocabulary with various inputs."""
input_data = [{'x': x} for x in x_data]
input_feature_spec = {'x': x_feature_spec}
if label_data is not None:
for idx, label in enumerate(label_data):
input_data[idx]['label'] = label
input_feature_spec['label'] = label_feature_spec
if weight_data is not None:
for idx, weight in enumerate(weight_data):
input_data[idx]['weights'] = weight
input_feature_spec['weights'] = weight_feature_spec
input_metadata = tft_unit.metadata_from_feature_spec(input_feature_spec)
def preprocessing_fn(inputs):
x = inputs['x']
labels = inputs.get('label')
weights = inputs.get('weights')
# Note even though the return value is not used, calling tft.vocabulary
# will generate the vocabulary as a side effect, and since we have named
# this vocabulary it can be looked up using public APIs.
tft.vocabulary(
x,
labels=labels,
weights=weights,
vocab_filename='my_vocab',
file_format=self._VocabFormat(),
**kwargs)
return inputs
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
input_data, # expected output data is same as input data
input_metadata, # expected output metadata is ame as input metadata
expected_vocab_file_contents={'my_vocab': expected_vocab_file_contents})
def testJointVocabularyForMultipleFeatures(self):
input_data = [{
'a': 'hello',
'b': 'world',
'c': 'aaaaa'
}, {
'a': 'good',
'b': '',
'c': 'hello'
}, {
'a': 'goodbye',
'b': 'hello',
'c': '\n'
}, {
'a': ' ',
'b': 'aaaaa',
'c': 'bbbbb'
}]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.string),
'b': tf.io.FixedLenFeature([], tf.string),
'c': tf.io.FixedLenFeature([], tf.string)
})
vocab_filename = 'test_compute_and_apply_vocabulary'
def preprocessing_fn(inputs):
deferred_vocab_and_filename = tft.vocabulary(
tf.concat([inputs['a'], inputs['b'], inputs['c']], 0),
vocab_filename=vocab_filename,
file_format=self._VocabFormat())
return {
'index_a':
tft.apply_vocabulary(
inputs['a'],
deferred_vocab_and_filename,
file_format=self._VocabFormat()),
'index_b':
tft.apply_vocabulary(
inputs['b'],
deferred_vocab_and_filename,
file_format=self._VocabFormat())
}
expected_vocab = [
b'hello', b'aaaaa', b'world', b'goodbye', b'good', b'bbbbb', b' ',
b'\n', b''
]
empty_index = len(expected_vocab) - 1
if self._VocabFormat() == 'text':
expected_vocab = expected_vocab[:-2]
empty_index = -1
max_index = len(expected_vocab) - 1
expected_data = [
# For tied frequencies, larger (lexicographic) items come first.
{
'index_a': 0, # hello
'index_b': 2 # world
},
{
'index_a': 4, # good
'index_b': empty_index # ''
},
{
'index_a': 3, # goodbye
'index_b': 0 # hello
},
{
'index_a': 6, # ' '
'index_b': 1 # aaaaa
},
]
expected_metadata = tft_unit.metadata_from_feature_spec(
{
'index_a': tf.io.FixedLenFeature([], tf.int64),
'index_b': tf.io.FixedLenFeature([], tf.int64),
}, {
'index_a':
schema_pb2.IntDomain(
min=-1, max=max_index, is_categorical=True),
'index_b':
schema_pb2.IntDomain(
min=-1, max=max_index, is_categorical=True),
})
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata,
expected_vocab_file_contents={vocab_filename: expected_vocab})
_EMPTY_VOCABULARY_PARAMS = tft_unit.cross_named_parameters([
dict(
testcase_name='_string',
x_data=['a', 'b'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string)),
dict(
testcase_name='_int64',
x_data=[1, 2],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64)),
], [
dict(
testcase_name='empty_vocabulary',
index_data=[-1, -1],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=0, is_categorical=True),
frequency_threshold=5),
])
@tft_unit.named_parameters(*([
dict(
testcase_name='_string_feature_with_label_top_2',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[-1, -1, -1, 0, 1, 1, 0, 0, 0, 1, 1, 0],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=1, is_categorical=True),
top_k=2),
dict(
testcase_name='_string_feature_with_label_top_1',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[-1, -1, -1, 0, -1, -1, 0, 0, 0, -1, -1, 0],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=0, is_categorical=True),
top_k=1),
dict(
testcase_name='_int_feature_with_label_top_2',
x_data=[3, 3, 3, 1, 2, 2, 1, 1, 2, 2, 1, 1],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[-1, -1, -1, 0, 1, 1, 0, 0, 0, 1, 1, 0],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=1, is_categorical=True),
top_k=2),
dict(
testcase_name='_varlen_feature',
x_data=[[b'world', b'hello', b'hello'], [b'hello', b'world', b'foo'],
[], [b'hello']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[1, 0, 0], [0, 1, -99], [], [0]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_vector_feature',
x_data=[[b'world', b'hello', b'hello'], [b'hello', b'world', b'moo'],
[b'hello', b'hello', b'foo'], [b'world', b'foo', b'moo']],
x_feature_spec=tf.io.FixedLenFeature([3], tf.string),
index_data=[[1, 0, 0], [0, 1, -99], [0, 0, -99], [1, -99, -99]],
index_feature_spec=tf.io.FixedLenFeature([3], tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_varlen_feature_with_labels',
x_data=[[b'hello', b'world', b'bye', b'moo'],
[b'world', b'moo', b'foo'], [b'hello', b'foo', b'moo'],
[b'moo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
label_data=[1, 0, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[[0, -99, 1, -99], [-99, -99, -99], [0, -99, -99], [-99]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_vector_feature_with_labels',
x_data=[[b'world', b'hello', b'hi'], [b'hello', b'world', b'moo'],
[b'hello', b'bye', b'foo'], [b'world', b'foo', b'moo']],
x_feature_spec=tf.io.FixedLenFeature([3], tf.string),
label_data=[1, 0, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[[-99, -99, 1], [-99, -99, 0], [-99, -99, -99],
[-99, -99, 0]],
index_feature_spec=tf.io.FixedLenFeature([3], tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_varlen_integer_feature_with_labels',
x_data=[[0, 1, 3, 2], [1, 2, 4], [0, 4, 2], [2]],
x_feature_spec=tf.io.VarLenFeature(tf.int64),
label_data=[1, 0, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[[0, -99, 1, -99], [-99, -99, -99], [0, -99, -99], [-99]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_varlen_feature_with_some_empty_feature_values',
x_data=[[b'world', b'hello', b'hi', b'moo'], [],
[b'world', b'hello', b'foo'], []],
x_feature_spec=tf.io.VarLenFeature(tf.string),
label_data=[1, 0, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[[0, 1, -99, -99], [], [0, 1, -99], []],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_varlen_with_multiclass_labels',
x_data=[[1, 2, 3, 5], [1, 4, 5], [1, 2], [1, 2], [1, 3, 5], [1, 4, 3],
[1, 3]],
x_feature_spec=tf.io.VarLenFeature(tf.int64),
label_data=[1, 0, 1, 1, 4, 5, 4],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[[-1, 0, 2, 3], [-1, 1, 3], [-1, 0], [-1, 0], [-1, 2, 3],
[-1, 1, 2], [-1, 2]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=3, is_categorical=True),
top_k=4),
dict(
testcase_name='_labels_and_weights',
x_data=[
b'hello', b'hello', b'hello', b'goodbye', b'aaaaa', b'aaaaa',
b'goodbye', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
weight_data=[
0.3, 0.4, 0.3, 1.2, 0.6, 0.7, 1.0, 1.0, 0.6, 0.7, 1.0, 1.0
],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
index_data=[2, 2, 2, 1, 0, 0, 1, 1, 0, 0, 1, 1],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=2,
is_categorical=True)),
dict(
testcase_name='_string_feature_with_weights',
x_data=[
b'hello', b'world', b'goodbye', b'aaaaa', b'aaaaa', b'goodbye'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
weight_data=[1.0, .5, 1.0, .26, .25, 1.5],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
index_data=[1, 3, 0, 2, 2, 0],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=3,
is_categorical=True)),
dict(
testcase_name='_int64_feature_with_weights',
x_data=[2, 1, 3, 4, 4, 3],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
weight_data=[1.0, .5, 1.0, .26, .25, 1.5],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
index_data=[1, 3, 0, 2, 2, 0],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=3,
is_categorical=True)),
dict(
testcase_name='_whitespace_newlines_and_empty_strings_text',
x_data=[
b'hello', b'world', b'hello', b'hello', b'goodbye', b'world',
b'aaaaa', b' ', b'', b'\n', b'hi \n ho \n', '\r'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
# The empty string and strings containing newlines map to default
# value because the vocab cannot contain them.
index_data=[0, 1, 0, 0, 2, 1, 3, 4, -1, -1, -1, -1],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=4, is_categorical=True),
vocab_filename='my_vocab',
expected_vocab_file_contents={
'my_vocab': [b'hello', b'world', b'goodbye', b'aaaaa', b' ']
},
required_format='text'),
dict(
testcase_name='_whitespace_newlines_and_empty_strings_tfrecord',
x_data=[
b'hello', b'world', b'hello', b'hello', b'goodbye', b'world',
b'aaaaa', b' ', b'', b'\n', b'hi \n ho \n', b'\r'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
index_data=[0, 0, 0, 1, 1, 8, 3, 2, 4, 5, 6, 7],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=8, is_categorical=True),
vocab_filename='my_vocab',
expected_vocab_file_contents={
'my_vocab': [
b'hello', b'world', b'hi \n ho \n', b'goodbye', b'aaaaa',
b' ', b'\r', b'\n', b''
]
},
required_format='tfrecord_gzip'),
dict(
testcase_name='_whitespace_newlines_empty_oov_buckets_text',
x_data=[
b'hello', b'world', b'hello', b'hello', b'goodbye', b'world',
b'aaaaa', b' ', b'', b'\n', b'hi \n ho \n', '\r'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
index_data=[0, 1, 0, 0, 2, 1, 3, 4, 5, 5, 5, 5],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=0, max=5, is_categorical=True),
num_oov_buckets=1,
vocab_filename='my_vocab',
expected_vocab_file_contents={
'my_vocab': [b'hello', b'world', b'goodbye', b'aaaaa', b' ']
},
required_format='text'),
dict(
testcase_name='_whitespace_newlines_empty_oov_buckets_tfrecord',
x_data=[
b'hello', b'world', b'hello', b'hello', b'goodbye', b'world',
b'aaaaa', b' ', b'', b'\n', b'hi \n ho \n', '\r'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
index_data=[0, 0, 1, 0, 1, 8, 3, 2, 4, 5, 6, 7],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=0, max=9, is_categorical=True),
num_oov_buckets=1,
vocab_filename='my_vocab',
expected_vocab_file_contents={
'my_vocab': [
b'hello', b'world', b'hi \n ho \n', b'goodbye', b'aaaaa',
b' ', b'\r', b'\n', b''
]
},
required_format='tfrecord_gzip'),
dict(
testcase_name='_positive_and_negative_integers',
x_data=[13, 14, 13, 13, 12, 14, 11, 10, 10, -10, -10, -20],
x_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[0, 1, 0, 0, 4, 1, 5, 2, 2, 3, 3, 6],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=6, is_categorical=True),
vocab_filename='my_vocab',
expected_vocab_file_contents={
'my_vocab': [b'13', b'14', b'10', b'-10', b'12', b'11', b'-20']
}),
dict(
testcase_name='_rank_2',
x_data=[[[b'some', b'say'], [b'the', b'world']],
[[b'will', b'end'], [b'in', b'fire']],
[[b'some', b'say'], [b'in', b'ice']]],
x_feature_spec=tf.io.FixedLenFeature([2, 2], tf.string),
index_data=[[[0, 1], [5, 3]], [[4, 8], [2, 7]], [[0, 1], [2, 6]]],
index_feature_spec=tf.io.FixedLenFeature([2, 2], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=8,
is_categorical=True)),
dict(
testcase_name='_top_k',
x_data=[[b'hello', b'hello', b'world'],
[b'hello', b'goodbye', b'world'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1], [0, -99, 1], [0, -99, -99]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=1, is_categorical=True),
default_value=-99,
top_k=2),
dict(
testcase_name='_top_k_specified_as_str',
x_data=[[b'hello', b'hello', b'world'],
[b'hello', b'goodbye', b'world'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1], [0, -9, 1], [0, -9, -9]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(min=-9, max=1, is_categorical=True),
default_value=-9,
top_k='2'),
dict(
testcase_name='_frequency_threshold',
x_data=[[b'hello', b'hello', b'world'],
[b'hello', b'goodbye', b'world'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1], [0, 2, 1], [0, 2, -99]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=2, is_categorical=True),
default_value=-99,
frequency_threshold=2),
dict(
testcase_name='_frequency_threshold_specified_with_str',
x_data=[[b'hello', b'hello', b'world'],
[b'hello', b'goodbye', b'world'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1], [0, 2, 1], [0, 2, -9]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(min=-9, max=2, is_categorical=True),
default_value=-9,
frequency_threshold='2'),
dict(
testcase_name='_empty_vocabulary_from_high_frequency_threshold',
x_data=[[b'hello', b'hello', b'world'],
[b'hello', b'goodbye', b'world'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[-99, -99, -99], [-99, -99, -99], [-99, -99, -99]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=0, is_categorical=True),
default_value=-99,
frequency_threshold=77),
dict(
testcase_name='_top_k_and_oov',
x_data=[[b'hello', b'hello', b'world', b'world'],
[b'hello', b'tarkus', b'toccata'],
[b'hello', b'goodbye', b'foo']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
# Generated vocab (ordered by frequency, then value) should be:
# ["hello", "world", "goodbye", "foo", "tarkus", "toccata"]. After
# applying top_k =1 this becomes ["hello"] plus three OOV buckets.
# The specific output values here depend on the hash of the words,
# and the test will break if the hash changes.
index_data=[[0, 0, 2, 2], [0, 3, 1], [0, 2, 1]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(min=0, max=3, is_categorical=True),
default_value=-99,
top_k=1,
num_oov_buckets=3),
dict(
testcase_name='_key_fn',
x_data=[['a_X_1', 'a_X_1', 'a_X_2', 'b_X_1', 'b_X_2'],
['a_X_1', 'a_X_1', 'a_X_2', 'a_X_2'], ['b_X_2']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1, -99, 2], [0, 0, 1, 1], [2]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=2, is_categorical=True),
coverage_top_k=1,
default_value=-99,
key_fn=lambda s: s.split(b'_X_')[0],
frequency_threshold=3),
dict(
testcase_name='_key_fn_and_multi_coverage_top_k',
x_data=[['a_X_1', 'a_X_1', 'a_X_2', 'b_X_1', 'b_X_2'],
['a_X_1', 'a_X_1', 'a_X_2', 'a_X_2', 'a_X_3'], ['b_X_2']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, 1, 3, 2], [0, 0, 1, 1, -99], [2]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=3, is_categorical=True),
coverage_top_k=2,
default_value=-99,
key_fn=lambda s: s.split(b'_X_')[0],
frequency_threshold=300),
dict(
testcase_name='_key_fn_and_top_k',
x_data=[['a_X_1', 'a_X_1', 'a_X_2', 'b_X_1', 'b_X_2'],
['a_X_1', 'a_X_1', 'a_X_2', 'a_X_2'],
['b_X_2', 'b_X_2', 'b_X_2', 'b_X_2', 'c_X_1']],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[1, 1, -99, -99, 0], [1, 1, -99, -99], [0, 0, 0, 0, 2]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=2, is_categorical=True),
coverage_top_k=1,
default_value=-99,
key_fn=lambda s: s.split(b'_X_')[0],
top_k=2),
dict(
testcase_name='_key_fn_multi_coverage_top_k',
x_data=[
['0_X_a', '0_X_a', '5_X_a', '6_X_a', '6_X_a', '0_X_a'],
['0_X_a', '2_X_a', '2_X_a', '2_X_a', '0_X_a', '5_X_a'],
['1_X_b', '1_X_b', '3_X_b', '3_X_b', '0_X_b', '1_X_b', '1_X_b']
],
x_feature_spec=tf.io.VarLenFeature(tf.string),
index_data=[[0, 0, -99, -99, -99, 0], [0, 2, 2, 2, 0, -99],
[1, 1, 3, 3, -99, 1, 1]],
index_feature_spec=tf.io.VarLenFeature(tf.int64),
index_domain=schema_pb2.IntDomain(
min=-99, max=3, is_categorical=True),
coverage_top_k=2,
default_value=-99,
key_fn=lambda s: s.split(b'_X_')[1],
frequency_threshold=4),
dict(
testcase_name='_key_fn_and_labels',
x_data=[
'aaa', 'aaa', 'aaa', 'aab', 'aba', 'aba', 'aab', 'aab', 'aba',
'abc', 'abc', 'aab'
],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
label_data=[1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0],
label_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_data=[0, 0, 0, -1, -1, -1, -1, -1, -1, 1, 1, -1],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=1, is_categorical=True),
coverage_top_k=1,
key_fn=lambda s: s[:2],
frequency_threshold=3),
dict(
testcase_name='_key_fn_and_weights',
x_data=['xa', 'xa', 'xb', 'ya', 'yb', 'yc'],
x_feature_spec=tf.io.FixedLenFeature([], tf.string),
weight_data=[1.0, 0.5, 3.0, 0.6, 0.25, 0.5],
weight_feature_spec=tf.io.FixedLenFeature([], tf.float32),
index_data=[1, 1, 0, -1, -1, -1],
index_feature_spec=tf.io.FixedLenFeature([], tf.int64),
index_domain=schema_pb2.IntDomain(min=-1, max=1, is_categorical=True),
coverage_top_k=1,
key_fn=lambda s: s[0],
frequency_threshold=1.5,
coverage_frequency_threshold=1),
] + _EMPTY_VOCABULARY_PARAMS))
def testComputeAndApplyVocabulary(self,
x_data,
x_feature_spec,
index_data,
index_feature_spec,
index_domain,
label_data=None,
label_feature_spec=None,
weight_data=None,
weight_feature_spec=None,
expected_vocab_file_contents=None,
required_format=None,
**kwargs):
"""Test tft.compute_and_apply_vocabulary with various inputs."""
if required_format is not None and required_format != self._VocabFormat():
raise tft_unit.SkipTest('Test only applicable to format: {}.'.format(
self._VocabFormat()))
input_data = [{'x': x} for x in x_data]
input_feature_spec = {'x': x_feature_spec}
expected_data = [{'index': index} for index in index_data]
expected_feature_spec = {'index': index_feature_spec}
expected_domains = {'index': index_domain}
if label_data is not None:
for idx, label in enumerate(label_data):
input_data[idx]['label'] = label
input_feature_spec['label'] = label_feature_spec
if weight_data is not None:
for idx, weight in enumerate(weight_data):
input_data[idx]['weights'] = weight
input_feature_spec['weights'] = weight_feature_spec
input_metadata = tft_unit.metadata_from_feature_spec(input_feature_spec)
expected_metadata = tft_unit.metadata_from_feature_spec(
expected_feature_spec, expected_domains)
def preprocessing_fn(inputs):
x = inputs['x']
labels = inputs.get('label')
weights = inputs.get('weights')
index = tft.compute_and_apply_vocabulary(
x,
labels=labels,
weights=weights,
file_format=self._VocabFormat(),
**kwargs)
return {'index': index}
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata,
expected_vocab_file_contents=expected_vocab_file_contents)
def testSparseComputeAndApplyVocabulary(self):
feature_spec = {
'x': tf.io.SparseFeature(['idx0', 'idx1'], 'val', tf.string, [2, 3])
}
input_metadata = tft_unit.metadata_from_feature_spec(feature_spec)
input_data = [
{
'val': ['hello'],
'idx0': [0],
'idx1': [0]
},
{
'val': ['world'],
'idx0': [1],
'idx1': [1]
},
{
'val': ['hello', 'goodbye'],
'idx0': [0, 1],
'idx1': [1, 2]
},
{
'val': ['hello', 'goodbye', ' '],
'idx0': [0, 1, 1],
'idx1': [0, 1, 2]
},
]
expected_data = [{
'index$sparse_indices_0': [0],
'index$sparse_indices_1': [0],
'index$sparse_values': [0],
}, {
'index$sparse_indices_0': [1],
'index$sparse_indices_1': [1],
'index$sparse_values': [2],
}, {
'index$sparse_indices_0': [0, 1],
'index$sparse_indices_1': [1, 2],
'index$sparse_values': [0, 1],
}, {
'index$sparse_indices_0': [0, 1, 1],
'index$sparse_indices_1': [0, 1, 2],
'index$sparse_values': [0, 1, 3],
}]
expected_vocab_file_contents = {
'my_vocab': [b'hello', b'goodbye', b'world', b' ']
}
def preprocessing_fn(inputs):
index = tft.compute_and_apply_vocabulary(
inputs['x'],
file_format=self._VocabFormat(),
vocab_filename='my_vocab')
return {'index': index}
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_vocab_file_contents=expected_vocab_file_contents)
# Example on how to use the vocab frequency as part of the transform
# function.
def testCreateVocabWithFrequency(self):
input_data = [
{'a': 'hello', 'b': 'world', 'c': 'aaaaa'},
{'a': 'good', 'b': '', 'c': 'hello'},
{'a': 'goodbye', 'b': 'hello', 'c': '\n'},
{'a': '_', 'b': 'aaaaa', 'c': 'bbbbb'}
]
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.string),
'b': tf.io.FixedLenFeature([], tf.string),
'c': tf.io.FixedLenFeature([], tf.string)
})
vocab_filename = 'test_vocab_with_frequency'
def preprocessing_fn(inputs):
deferred_vocab_and_filename = tft.vocabulary(
tf.concat([inputs['a'], inputs['b'], inputs['c']], 0),
vocab_filename=vocab_filename,
store_frequency=True,
file_format=self._VocabFormat())
def _make_table_initializer(filename_tensor, is_frequency_value):
if self._VocabFormat() == 'text':
return tf.lookup.TextFileInitializer(
filename=filename_tensor,
key_dtype=tf.string,
key_index=1,
value_dtype=tf.int64,
value_index=(0 if is_frequency_value else
tf.lookup.TextFileIndex.LINE_NUMBER),
delimiter=' ')
elif self._VocabFormat() == 'tfrecord_gzip':
return tft.tf_utils.make_tfrecord_vocabulary_lookup_initializer(
filename_tensor,
return_indicator_as_value=is_frequency_value,
has_indicator=True)
def _apply_vocab(y, deferred_vocab_filename_tensor):
initializer = _make_table_initializer(deferred_vocab_filename_tensor,
False)
table = tf.lookup.StaticHashTable(initializer, default_value=-1)
table_size = table.size()
return table.lookup(y), table_size
def _apply_frequency(y, deferred_vocab_filename_tensor):
initializer = _make_table_initializer(deferred_vocab_filename_tensor,
True)
table = tf.lookup.StaticHashTable(initializer, default_value=-1)
return table.lookup(y), table.size()
return {
'index_a':
tft.apply_vocabulary(
inputs['a'],
deferred_vocab_and_filename,
lookup_fn=_apply_vocab,
file_format=self._VocabFormat()),
'frequency_a':
tft.apply_vocabulary(
inputs['a'],
deferred_vocab_and_filename,
lookup_fn=_apply_frequency,
file_format=self._VocabFormat()),
'index_b':
tft.apply_vocabulary(
inputs['b'],
deferred_vocab_and_filename,
lookup_fn=_apply_vocab,
file_format=self._VocabFormat()),
'frequency_b':
tft.apply_vocabulary(
inputs['b'],
deferred_vocab_and_filename,
lookup_fn=_apply_frequency,
file_format=self._VocabFormat()),
}
expected_vocab = [(b'hello', 3), (b'aaaaa', 2), (b'world', 1),
(b'goodbye', 1), (b'good', 1), (b'bbbbb', 1), (b'_', 1),
(b'\n', 1), (b'', 1)]
if self._VocabFormat() == 'text':
expected_vocab = expected_vocab[:-2]
empty_index = -1
empty_frequency = -1
else:
empty_index = 8
empty_frequency = 1
expected_data = [
# For tied frequencies, larger (lexicographic) items come first.
{
'index_a': 0,
'frequency_a': 3,
'index_b': 2,
'frequency_b': 1
},
{
'index_a': 4,
'frequency_a': 1,
'index_b': empty_index,
'frequency_b': empty_frequency
},
{
'index_a': 3,
'frequency_a': 1,
'index_b': 0,
'frequency_b': 3
},
{
'index_a': 6,
'frequency_a': 1,
'index_b': 1,
'frequency_b': 2
}
]
size = len(expected_vocab) - 1
expected_metadata = tft_unit.metadata_from_feature_spec(
{
'index_a': tf.io.FixedLenFeature([], tf.int64),
'index_b': tf.io.FixedLenFeature([], tf.int64),
'frequency_a': tf.io.FixedLenFeature([], tf.int64),
'frequency_b': tf.io.FixedLenFeature([], tf.int64),
}, {
'index_a':
schema_pb2.IntDomain(min=-1, max=size, is_categorical=True),
'index_b':
schema_pb2.IntDomain(min=-1, max=size, is_categorical=True),
'frequency_a':
schema_pb2.IntDomain(min=-1, max=size, is_categorical=True),
'frequency_b':
schema_pb2.IntDomain(min=-1, max=size, is_categorical=True),
})
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata,
expected_vocab_file_contents={vocab_filename: expected_vocab})
def testVocabularyAnalyzerWithTokenization(self):
def preprocessing_fn(inputs):
return {
'index':
tft.compute_and_apply_vocabulary(
tf.compat.v1.strings.split(inputs['a']),
file_format=self._VocabFormat(),
vocab_filename='my_vocab')
}
input_data = [{'a': 'hello hello world'}, {'a': 'hello goodbye world'}]
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
expected_data = [{'index': [0, 0, 1]}, {'index': [0, 2, 1]}]
expected_metadata = tft_unit.metadata_from_feature_spec({
'index': tf.io.VarLenFeature(tf.int64),
}, {
'index': schema_pb2.IntDomain(min=-1, max=2, is_categorical=True),
})
expected_vocabulary = {'my_vocab': [b'hello', b'world', b'goodbye']}
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata=expected_metadata,
expected_vocab_file_contents=expected_vocabulary)
def testVocabularyWithFrequency(self):
outfile = 'vocabulary_with_frequency'
def preprocessing_fn(inputs):
# Force the analyzer to be executed, and store the frequency file as a
# side-effect.
_ = tft.vocabulary(
inputs['a'],
vocab_filename=outfile,
store_frequency=True,
file_format=self._VocabFormat())
_ = tft.vocabulary(
inputs['a'], store_frequency=True, file_format=self._VocabFormat())
_ = tft.vocabulary(
inputs['b'], store_frequency=True, file_format=self._VocabFormat())
# The following must not produce frequency output, just the vocab words.
_ = tft.vocabulary(inputs['b'], file_format=self._VocabFormat())
a_int = tft.compute_and_apply_vocabulary(
inputs['a'], file_format=self._VocabFormat())
# Return input unchanged, this preprocessing_fn is a no-op except for
# computing uniques.
return {'a_int': a_int}
input_metadata = tft_unit.metadata_from_feature_spec({
'a': tf.io.FixedLenFeature([], tf.string),
'b': tf.io.FixedLenFeature([], tf.string)
})
tft_tmp_dir = os.path.join(self.get_temp_dir(), 'temp_dir')
transform_fn_dir = os.path.join(self.get_temp_dir(), 'export_transform_fn')
with beam_impl.Context(temp_dir=tft_tmp_dir):
with self._makeTestPipeline() as pipeline:
input_data = pipeline | beam.Create([
{'a': 'hello', 'b': 'hi'},
{'a': 'world', 'b': 'ho ho'},
{'a': 'hello', 'b': 'ho ho'},
])
transform_fn = (
(input_data, input_metadata)
| beam_impl.AnalyzeDataset(preprocessing_fn))
_ = transform_fn | transform_fn_io.WriteTransformFn(transform_fn_dir)
self.assertTrue(os.path.isdir(tft_tmp_dir))
tft_output = tft.TFTransformOutput(transform_fn_dir)
assets_path = os.path.join(tft_output.transform_savedmodel_dir,
tf.saved_model.ASSETS_DIRECTORY)
self.assertTrue(os.path.isdir(assets_path))
self.assertEqual([b'2 hello', b'1 world'],
tft_output.vocabulary_by_name(outfile))
self.assertEqual(
[b'2 hello', b'1 world'],
tft_output.vocabulary_by_name('vocab_frequency_vocabulary_1'))
self.assertEqual(
[b'2 ho ho', b'1 hi'],
tft_output.vocabulary_by_name('vocab_frequency_vocabulary_2'))
self.assertEqual([b'ho ho', b'hi'],
tft_output.vocabulary_by_name('vocab_vocabulary_3'))
self.assertEqual([b'hello', b'world'],
tft_output.vocabulary_by_name(
'vocab_compute_and_apply_vocabulary_vocabulary'))
def testVocabularyWithKeyFnAndFrequency(self):
def key_fn(string):
return string.split(b'_X_')[1]
outfile = 'vocabulary_with_frequency'
def preprocessing_fn(inputs):
# Force the analyzer to be executed, and store the frequency file as a
# side-effect.
_ = tft.vocabulary(
tf.compat.v1.strings.split(inputs['a']),
coverage_top_k=1,
key_fn=key_fn,
frequency_threshold=4,
vocab_filename=outfile,
store_frequency=True,
file_format=self._VocabFormat())
_ = tft.vocabulary(
tf.compat.v1.strings.split(inputs['a']),
coverage_top_k=1,
key_fn=key_fn,
frequency_threshold=4,
store_frequency=True,
file_format=self._VocabFormat())
a_int = tft.compute_and_apply_vocabulary(
tf.compat.v1.strings.split(inputs['a']),
coverage_top_k=1,
key_fn=key_fn,
frequency_threshold=4,
file_format=self._VocabFormat())
# Return input unchanged, this preprocessing_fn is a no-op except for
# computing uniques.
return {'a_int': a_int}
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
tft_tmp_dir = os.path.join(self.get_temp_dir(), 'temp_dir')
transform_fn_dir = os.path.join(self.get_temp_dir(), 'export_transform_fn')
with beam_impl.Context(temp_dir=tft_tmp_dir):
with self._makeTestPipeline() as pipeline:
input_data = pipeline | beam.Create([
{'a': '1_X_a 1_X_a 2_X_a 1_X_b 2_X_b'},
{'a': '1_X_a 1_X_a 2_X_a 2_X_a'},
{'a': '2_X_b 3_X_c 4_X_c'}
])
transform_fn = (
(input_data, input_metadata)
| beam_impl.AnalyzeDataset(preprocessing_fn))
_ = transform_fn | transform_fn_io.WriteTransformFn(transform_fn_dir)
self.assertTrue(os.path.isdir(tft_tmp_dir))
tft_output = tft.TFTransformOutput(transform_fn_dir)
assets_path = os.path.join(tft_output.transform_savedmodel_dir,
tf.saved_model.ASSETS_DIRECTORY)
self.assertTrue(os.path.isdir(assets_path))
self.assertEqual([b'4 1_X_a', b'2 2_X_b', b'1 4_X_c'],
tft_output.vocabulary_by_name(outfile))
def testVocabularyAnnotations(self):
outfile = 'vocab.file'
# Sanitization of vocabulary file names replaces '.' with '_'.
annotation_file = 'vocab_file'
if self._VocabFormat() == 'tfrecord_gzip':
annotation_file = '{}.tfrecord.gz'.format(annotation_file)
def preprocessing_fn(inputs):
_ = tft.vocabulary(
inputs['a'], vocab_filename=outfile, file_format=self._VocabFormat())
tft.annotate_asset('key_1', annotation_file)
return inputs
input_metadata = tft_unit.metadata_from_feature_spec(
{'a': tf.io.FixedLenFeature([], tf.string)})
tft_tmp_dir = os.path.join(self.get_temp_dir(), 'temp_dir')
transform_fn_dir = os.path.join(self.get_temp_dir(), 'export_transform_fn')
with beam_impl.Context(temp_dir=tft_tmp_dir):
with self._makeTestPipeline() as pipeline:
input_data = pipeline | beam.Create([
{
'a': 'hello',
},
{
'a': 'world',
},
{
'a': 'hello',
},
])
transform_fn = ((input_data, input_metadata)
| beam_impl.AnalyzeDataset(preprocessing_fn))
_, metadata = transform_fn
self.assertDictEqual(metadata.asset_map, {
'key_1': annotation_file,
outfile: annotation_file
})
_ = transform_fn | transform_fn_io.WriteTransformFn(transform_fn_dir)
self.assertTrue(os.path.isdir(tft_tmp_dir))
tft_output = tft.TFTransformOutput(transform_fn_dir)
assets_path = os.path.join(tft_output.transform_savedmodel_dir,
tf.saved_model.ASSETS_DIRECTORY)
self.assertTrue(os.path.isdir(assets_path))
self.assertEqual([b'hello', b'world'],
tft_output.vocabulary_by_name('key_1'))
@mock.patch.object(analyzer_impls, '_PRESORT_BATCH_SIZE', 2)
def testVocabularyPreSort(self):
input_data = [
dict(x=b'foo'),
dict(x=b'hello'),
dict(x=b'aaaaa'),
dict(x=b'goodbye'),
dict(x=b'bar'),
dict(x=b'hello'),
dict(x=b'goodbye'),
dict(x=b'hello'),
dict(x=b'hello'),
dict(x=b'goodbye'),
dict(x=b'aaaaa'),
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.string)})
expected_vocab_file_contents = [(b'hello', 4), (b'goodbye', 3),
(b'aaaaa', 2), (b'foo', 1), (b'bar', 1)]
def preprocessing_fn(inputs):
tft.vocabulary(
inputs['x'],
vocab_filename='my_vocab',
file_format=self._VocabFormat(),
store_frequency=True)
return inputs
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
input_data,
input_metadata,
expected_vocab_file_contents={'my_vocab': expected_vocab_file_contents})
def testVocabularyCustomLookup(self):
if self._VocabFormat() != 'text':
raise tft_unit.SkipTest('Test only applicable to text format.')
input_data = [
dict(x=b'bar'),
dict(x=b'foo'),
dict(x=b'bar'),
dict(x=b'bar'),
dict(x=b'foo'),
]
input_metadata = tft_unit.metadata_from_feature_spec(
{'x': tf.io.FixedLenFeature([], tf.string)})
expected_data = [
dict(x=b'bar', x_int=0, x_int_mean=0.4),
dict(x=b'bar', x_int=0, x_int_mean=0.4),
dict(x=b'bar', x_int=0, x_int_mean=0.4),
dict(x=b'foo', x_int=1, x_int_mean=0.4),
dict(x=b'foo', x_int=1, x_int_mean=0.4),
]
expected_metadata = tft_unit.metadata_from_feature_spec({
'x': tf.io.FixedLenFeature([], tf.string),
'x_int': tf.io.FixedLenFeature([], tf.int64),
'x_int_mean': tf.io.FixedLenFeature([], tf.float32)
})
expected_vocab_file_contents = [(b'bar'), (b'foo')]
def preprocessing_fn(inputs):
vocab_path = tft.vocabulary(
inputs['x'],
vocab_filename='my_vocab',
file_format=self._VocabFormat())
initializer = tf.lookup.TextFileInitializer(
vocab_path,
key_dtype=tf.string,
key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
value_dtype=tf.int64,
value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
table = tf.lookup.StaticHashTable(initializer, default_value=-1)
x_int = table.lookup(inputs['x'])
x_int_mean = tf.zeros_like(x_int, dtype=tf.float32) + tft.mean(x_int)
return {'x': inputs['x'], 'x_int': x_int, 'x_int_mean': x_int_mean}
self.assertAnalyzeAndTransformResults(
input_data,
input_metadata,
preprocessing_fn,
expected_data,
expected_metadata,
expected_vocab_file_contents={'my_vocab': expected_vocab_file_contents})
if __name__ == '__main__':
tft_unit.main()
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\nimport os\nimport sys\n\n\ndef is_active():\n return True\n\n\ndef get_name():\n return \"iOS\"\n\n\ndef can_build():\n\n import sys\n import os\n if sys.platform == 'darwin' or os.environ.has_key(... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\nimport os\nimport sys\n\n\ndef is_active():\n return True\n\n\ndef get_name():\n return \"iOS\"\n\n\ndef can_build():\n\n import sys\n import os\n if sys.platform == 'darwin' or os.... | ```python
import os
import sys
def is_active():
return True
def get_name():
return "iOS"
def can_build():
import sys
import os
if sys.platform == 'darwin' or os.environ.has_key("OSXCROSS_IOS"):
return True
return False
def get_opts():
return [
('IPHONEPLATFORM', 'name of the iphone platform', 'iPhoneOS'),
('IPHONEPATH', 'the path to iphone toolchain', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain'),
('IPHONESDK', 'path to the iphone SDK', '/Applications/Xcode.app/Contents/Developer/Platforms/${IPHONEPLATFORM}.platform/Developer/SDKs/${IPHONEPLATFORM}.sdk/'),
('game_center', 'Support for game center', 'yes'),
('store_kit', 'Support for in-app store', 'yes'),
('icloud', 'Support for iCloud', 'yes'),
('ios_gles22_override', 'Force GLES2.0 on iOS', 'yes'),
('ios_exceptions', 'Enable exceptions', 'no'),
('ios_triple', 'Triple for ios toolchain', ''),
('ios_sim', 'Build simulator binary', 'no'),
]
def get_flags():
return [
('tools', 'no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/iphone'])
env['ENV']['PATH'] = env['IPHONEPATH'] + "/Developer/usr/bin/:" + env['ENV']['PATH']
env['CC'] = '$IPHONEPATH/usr/bin/${ios_triple}clang'
env['CXX'] = '$IPHONEPATH/usr/bin/${ios_triple}clang++'
env['AR'] = '$IPHONEPATH/usr/bin/${ios_triple}ar'
env['RANLIB'] = '$IPHONEPATH/usr/bin/${ios_triple}ranlib'
import string
if (env["ios_sim"] == "yes" or env["arch"] == "x86"): # i386, simulator
env["arch"] = "x86"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-arch i386 -fobjc-abi-version=2 -fobjc-legacy-dispatch -fmessage-length=0 -fpascal-strings -fasm-blocks -D__IPHONE_OS_VERSION_MIN_REQUIRED=40100 -isysroot $IPHONESDK -mios-simulator-version-min=4.3 -DCUSTOM_MATRIX_TRANSFORM_H=\\\"build/iphone/matrix4_iphone.h\\\" -DCUSTOM_VECTOR3_TRANSFORM_H=\\\"build/iphone/vector3_iphone.h\\\"'))
elif (env["arch"] == "arm" or env["arch"] == "arm32" or env["arch"] == "armv7" or env["bits"] == "32"): # arm
env["arch"] = "arm"
env["bits"] = "32"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch armv7 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -isysroot $IPHONESDK -fvisibility=hidden -mthumb "-DIBOutlet=__attribute__((iboutlet))" "-DIBOutletCollection(ClassName)=__attribute__((iboutletcollection(ClassName)))" "-DIBAction=void)__attribute__((ibaction)" -miphoneos-version-min=9.0 -MMD -MT dependencies -isysroot $IPHONESDK'))
else: # armv64
env["arch"] = "arm64"
env["bits"] = "64"
env.Append(CCFLAGS=string.split('-fno-objc-arc -arch arm64 -fmessage-length=0 -fno-strict-aliasing -fdiagnostics-print-source-range-info -fdiagnostics-show-category=id -fdiagnostics-parseable-fixits -fpascal-strings -fvisibility=hidden -MMD -MT dependencies -miphoneos-version-min=9.0 -isysroot $IPHONESDK'))
env.Append(CPPFLAGS=['-DNEED_LONG_INT'])
env.Append(CPPFLAGS=['-DLIBYUV_DISABLE_NEON'])
if (env["arch"] == "x86"):
env['IPHONEPLATFORM'] = 'iPhoneSimulator'
env.Append(LINKFLAGS=['-arch', 'i386', '-mios-simulator-version-min=4.3',
'-isysroot', '$IPHONESDK',
#'-mmacosx-version-min=10.6',
'-Xlinker',
'-objc_abi_version',
'-Xlinker', '2',
'-framework', 'AudioToolbox',
'-framework', 'AVFoundation',
'-framework', 'CoreAudio',
'-framework', 'CoreGraphics',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
'-framework', 'Foundation',
'-framework', 'Security',
'-framework', 'UIKit',
'-framework', 'MediaPlayer',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'SystemConfiguration',
'-F$IPHONESDK',
])
elif (env["arch"] == "arm64"):
env.Append(LINKFLAGS=['-arch', 'arm64', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
#'-stdlib=libc++',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
])
else:
env.Append(LINKFLAGS=['-arch', 'armv7', '-Wl,-dead_strip', '-miphoneos-version-min=9.0',
'-isysroot', '$IPHONESDK',
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'CoreGraphics',
'-framework', 'OpenGLES',
'-framework', 'QuartzCore',
'-framework', 'CoreAudio',
'-framework', 'AudioToolbox',
'-framework', 'SystemConfiguration',
'-framework', 'Security',
#'-framework', 'AdSupport',
'-framework', 'MediaPlayer',
'-framework', 'AVFoundation',
'-framework', 'CoreMedia',
'-framework', 'CoreMotion',
])
if env['game_center'] == 'yes':
env.Append(CPPFLAGS=['-fblocks', '-DGAME_CENTER_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'GameKit'])
if env['store_kit'] == 'yes':
env.Append(CPPFLAGS=['-DSTOREKIT_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'StoreKit'])
if env['icloud'] == 'yes':
env.Append(CPPFLAGS=['-DICLOUD_ENABLED'])
env.Append(CPPPATH=['$IPHONESDK/usr/include', '$IPHONESDK/System/Library/Frameworks/OpenGLES.framework/Headers', '$IPHONESDK/System/Library/Frameworks/AudioUnit.framework/Headers'])
if (env["target"] == "release"):
env.Append(CCFLAGS=['-O3', '-DNS_BLOCK_ASSERTIONS=1', '-gdwarf-2']) # removed -ffast-math
env.Append(LINKFLAGS=['-O3'])
elif env["target"] == "release_debug":
env.Append(CCFLAGS=['-Os', '-DNS_BLOCK_ASSERTIONS=1', '-DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['-Os'])
env.Append(CPPFLAGS=['-DDEBUG_MEMORY_ENABLED'])
elif (env["target"] == "debug"):
env.Append(CCFLAGS=['-D_DEBUG', '-DDEBUG=1', '-gdwarf-2', '-O0', '-DDEBUG_ENABLED'])
env.Append(CPPFLAGS=['-DDEBUG_MEMORY_ENABLED'])
elif (env["target"] == "profile"):
env.Append(CCFLAGS=['-g', '-pg', '-Os'])
env.Append(LINKFLAGS=['-pg'])
if (env["ios_sim"] == "yes"): # TODO: Check if needed?
env['ENV']['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
env['ENV']['CODESIGN_ALLOCATE'] = '/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/codesign_allocate'
env.Append(CPPFLAGS=['-DIPHONE_ENABLED', '-DUNIX_ENABLED', '-DGLES2_ENABLED', '-DMPC_FIXED_POINT'])
# TODO: Move that to opus module's config
if("module_opus_enabled" in env and env["module_opus_enabled"] != "no"):
env.opus_fixed_point = "yes"
if env["arch"] == "x86":
pass
elif(env["arch"] == "arm64"):
env.Append(CFLAGS=["-DOPUS_ARM64_OPT"])
else:
env.Append(CFLAGS=["-DOPUS_ARM_OPT"])
if env['ios_exceptions'] == 'yes':
env.Append(CPPFLAGS=['-fexceptions'])
else:
env.Append(CPPFLAGS=['-fno-exceptions'])
# env['neon_enabled']=True
env['S_compiler'] = '$IPHONEPATH/Developer/usr/bin/gcc'
import methods
env.Append(BUILDERS={'GLSL120': env.Builder(action=methods.build_legacygl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL': env.Builder(action=methods.build_glsl_headers, suffix='glsl.h', src_suffix='.glsl')})
env.Append(BUILDERS={'GLSL120GLES': env.Builder(action=methods.build_gles2_headers, suffix='glsl.h', src_suffix='.glsl')})
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nimport os\nfrom PyQt5.QtCore import pyqtSignal, pyqtProperty, qDebug, pyqtSlot\n\nfrom dice.dice_extras.core_app import CoreApp\nfrom dice.dice_extras.tools.json_sync import JsonList\n\n\nclass Home(CoreApp):\n def __init__(self... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nimport os\nfrom PyQt5.QtCore import pyqtSignal, pyqtProperty, qDebug, pyqtSlot\n\nfrom dice.dice_extras.core_app import CoreApp\nfrom dice.dice_extras.tools.json_sync import JsonList\n\n\nclass Home(CoreApp):\n d... | ```python
import os
from PyQt5.QtCore import pyqtSignal, pyqtProperty, qDebug, pyqtSlot
from dice.dice_extras.core_app import CoreApp
from dice.dice_extras.tools.json_sync import JsonList
class Home(CoreApp):
def __init__(self, parent=None):
super(Home, self).__init__(parent)
settings_folder = os.path.join(os.path.expanduser("~"), ".config", "DICE")
if not os.path.exists(settings_folder):
os.makedirs(settings_folder)
self.__recent_projects = JsonList(os.path.join(settings_folder, "recent_projects.json"))
self.__max_recent_projects = 10 # TODO: get this value from settings
recent_projects_changed = pyqtSignal(name="recentProjectsChanged")
@property
def recent_projects(self):
return self.__recent_projects.to_simple_list()
recentProjects = pyqtProperty("QVariantList", fget=recent_projects.fget, notify=recent_projects_changed)
def add_recent_project(self, project_name, location):
recent_locations = [recent_project['location'] for recent_project in self.__recent_projects]
recent_project = {'projectName': project_name, 'location': location}
if location not in recent_locations:
self.__recent_projects.insert(0, recent_project)
while len(self.__recent_projects) > self.__max_recent_projects:
self.__recent_projects.pop()
self.recent_projects_changed.emit()
else:
# add the project on top of the list
index = self.__recent_projects.index(recent_project)
if index != 0:
self.__recent_projects.pop(index)
self.__recent_projects.insert(0, recent_project)
self.recent_projects_changed.emit()
@pyqtSlot(name="closeProject")
def close_project(self):
self.dice.project.close()
self.dice.desk.clear_workspace()
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python3\r\n\"\"\"\r\nTo run: python3 nb2to3.py notebook-or-directory\r\n\"\"\"\r\n# Authors: Thomas Kluyver, Fernando Perez\r\n# See: https://gist.github.com/takluyver/c8839593c615bb2f6e80\r\n\r\n# found at https://sta... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python3\r\n\"\"\"\r\nTo run: python3 nb2to3.py notebook-or-directory\r\n\"\"\"\r\n# Authors: Thomas Kluyver, Fernando Perez\r\n# See: https://gist.github.com/takluyver/c8839593c615bb2f6e80\r\n\r\n# foun... | ```python
#!/usr/bin/env python3
"""
To run: python3 nb2to3.py notebook-or-directory
"""
# Authors: Thomas Kluyver, Fernando Perez
# See: https://gist.github.com/takluyver/c8839593c615bb2f6e80
# found at https://stackoverflow.com/questions/20651502/ipython-code-migration-from-python-2-to-python-3
import argparse
import pathlib
from nbformat import read, write
import lib2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
def refactor_notebook_inplace(rt, path):
def refactor_cell(src):
#print('\n***SRC***\n', src)
try:
tree = rt.refactor_string(src+'\n', str(path) + '/cell-%d' % i)
except (lib2to3.pgen2.parse.ParseError,
lib2to3.pgen2.tokenize.TokenError):
return src
else:
return str(tree)[:-1]
print("Refactoring:", path)
nb = read(str(path), as_version=4)
# Run 2to3 on code
for i, cell in enumerate(nb.cells, start=1):
if cell.cell_type == 'code':
if cell.execution_count in (' ', '*'):
cell.execution_count = None
if cell.source.startswith('%%'):
# For cell magics, try to refactor the body, in case it's
# valid python
head, source = cell.source.split('\n', 1)
cell.source = head + '\n' + refactor_cell(source)
else:
cell.source = refactor_cell(cell.source)
# Update notebook metadata
nb.metadata.kernelspec = {
'display_name': 'Python 3',
'name': 'python3',
'language': 'python',
}
if 'language_info' in nb.metadata:
nb.metadata.language_info.codemirror_mode = {
'name': 'ipython',
'version': 3,
}
nb.metadata.language_info.pygments_lexer = 'ipython3'
nb.metadata.language_info.pop('version', None)
write(nb, str(path))
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('path', type=pathlib.Path,
help="Notebook or directory containing notebooks")
options = ap.parse_args(argv)
avail_fixes = set(get_fixers_from_package('lib2to3.fixes'))
rt = RefactoringTool(avail_fixes)
if options.path.is_dir():
for nb_path in options.path.rglob('*.ipynb'):
refactor_notebook_inplace(rt, nb_path)
else:
refactor_notebook_inplace(rt, options.path)
if __name__ == '__main__':
main()
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# encoding: utf8\nimport imaplib\nimport optparse\nimport MySQLdb\nimport memcache\nimport ldb\nimport os\nimport re\nimport subprocess\nimport sys\nfrom samba.param import LoadParm\nfrom samba.samdb import SamDB\nfrom samba.auth import system_session\n\n\nclass S... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# encoding: utf8\nimport imaplib\nimport optparse\nimport MySQLdb\nimport memcache\nimport ldb\nimport os\nimport re\nimport subprocess\nimport sys\nfrom samba.param import LoadParm\nfrom samba.samdb import SamDB\nfrom samba.auth import system_sess... | ```python
#!/usr/bin/env python
# encoding: utf8
import imaplib
import optparse
import MySQLdb
import memcache
import ldb
import os
import re
import subprocess
import sys
from samba.param import LoadParm
from samba.samdb import SamDB
from samba.auth import system_session
class SambaOCHelper(object):
def __init__(self):
self.samba_lp = LoadParm()
self.samba_lp.set('debug level', '0')
self.samba_lp.load_default()
url = self.samba_lp.get('dcerpc_mapiproxy:samdb_url') or \
self.samba_lp.private_path("sam.ldb")
self.samdb = SamDB(url=url,
lp=self.samba_lp,
session_info=system_session())
self.conn = self._open_mysql_connection()
def _open_mysql_connection(self):
connection_string = self.samba_lp.get('mapiproxy:openchangedb')
if not connection_string:
raise Exception("Not found mapiproxy:openchangedb on samba configuration")
# mysql://openchange:password@localhost/openchange
m = re.search(r'(?P<scheme>.+)://(?P<user>.+):(?P<pass>.+)@(?P<host>.+)/(?P<db>.+)',
connection_string)
if not m:
raise Exception("Unable to parse mapiproxy:openchangedb: %s" %
connection_string)
group_dict = m.groupdict()
if group_dict['scheme'] != 'mysql':
raise Exception("mapiproxy:openchangedb should start with mysql:// (we got %s)",
group_dict['scheme'])
conn = MySQLdb.connect(host=group_dict['host'], user=group_dict['user'],
passwd=group_dict['pass'], db=group_dict['db'])
conn.autocommit(True)
return conn
def invalid_user(self, username):
ret = self.samdb.search(base=self.samdb.domain_dn(),
scope=ldb.SCOPE_SUBTREE,
expression="(sAMAccountName=%s)" % ldb.binary_encode(username))
return len(ret) != 1
def find_email_of(self, username):
ret = self.samdb.search(base=self.samdb.domain_dn(),
scope=ldb.SCOPE_SUBTREE, attrs=["mail"],
expression="(sAMAccountName=%s)" % ldb.binary_encode(username))
return ret[0]["mail"][0]
def active_openchange_users(self):
c = self.conn.cursor()
c.execute("SELECT name FROM mailboxes")
return sorted([row[0] for row in c.fetchall()])
class ImapCleaner(object):
def __init__(self, dry_run=False, samba_helper=SambaOCHelper(),
imap_host=None, imap_port=None, imap_ssl=False):
self.dry_run = dry_run
self.samba_helper = samba_helper
self.imap_host = imap_host
self.imap_port = imap_port
self.imap_ssl = imap_ssl
self.system_defaults_file = "/etc/sogo/sogo.conf"
def _get_connection_url(self):
connection_url = None
# read defaults from defaults files
if os.path.exists(self.system_defaults_file):
p1 = subprocess.Popen(["sogo-tool", "dump-defaults", "-f",
self.system_defaults_file],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "-F\"",
"/ SOGoIMAPServer =/ {print $2}"],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
tmp = p2.communicate()[0]
if tmp:
connection_url = tmp
return connection_url
def _get_imap_from_sogo(self):
connection_url = self._get_connection_url()
# imap[s]://127.0.0.1:143
m = re.search('((?P<scheme>.+)://)?(?P<host>.+):(?P<port>\d+)',
connection_url)
if not m:
raise Exception("ERROR Unable to parse IMAPServer: %s" %
connection_url)
group_dict = m.groupdict()
if group_dict['scheme'] not in (None, 'imaps', 'imap'):
raise Exception("IMAPServer should start with imap[s]:// "
"(we got %s)", group_dict['scheme'])
self.imap_host = group_dict['host']
self.imap_port = group_dict['port']
self.imap_ssl = group_dict['scheme'] == 'imaps'
def cleanup(self, username, password=""):
print "===== IMAP cleanup ====="
if not self.imap_host:
self._get_imap_from_sogo()
if self.imap_ssl:
client = imaplib.IMAP4_SSL(self.imap_host, self.imap_port)
else:
client = imaplib.IMAP4(self.imap_host, self.imap_port)
if not password:
master_file = self.samba_helper.samba_lp.private_path('mapistore/master.password')
if os.path.exists(master_file):
with open(master_file) as f:
password = f.read()
else:
password = 'unknown'
try:
email = self.samba_helper.find_email_of(username)
code, data = client.login(email, password)
if code != "OK":
raise Exception("Login failure")
print " [IMAP] Logged in as %s" % email
patterns = ["*(1)*", "Spam", "Sync Issues*", "Problemas de sincroni*",
"Problèmes de synchronisation*"]
for pattern in patterns:
code, data = client.list("", pattern)
if code != "OK":
raise Exception("ERROR IMAP listing folders with pattern %s" % pattern)
if data[0] is None:
print " [IMAP] No folders with %s pattern" % pattern
else:
for d in data:
folder_name = None
m = re.match(".*\".\" \"(.*)\"", d)
if m:
folder_name = m.group(1)
if not folder_name:
m = re.match(".*\".\" (.*)", d)
if m:
folder_name = m.group(1)
if not folder_name:
raise Exception("Couldn't parse folder name on %r" % d)
if not self.dry_run:
code, data = client.delete(folder_name)
if code != "OK":
raise Exception("ERROR IMAP deleting %s" % folder_name)
print " [IMAP] Deleted %s" % folder_name
finally:
client.logout()
class MemcachedCleaner(object):
def __init__(self, samba_helper=SambaOCHelper(), dry_run=False):
self.samba_helper = samba_helper
self.dry_run = dry_run
def cleanup(self, username):
print "===== Memcached cleanup ====="
mc = self._connect_to_memcached()
if all(s.connect() == 0 for s in mc.servers):
print " [Memcached] No memcached servers"
return
keys = self._get_all_keys(mc)
if not keys:
print " [Memcached] There are no keys to delete"
return
print "WARNING: All data from memcached will be deleted"
if not self.dry_run:
for key in keys:
mc.delete(key)
print " [Memcached] Deleted %d keys" % len(keys)
def _connect_to_memcached(self):
# FIXME read from openchange conf
host = "127.0.0.1:11211"
return memcache.Client([host])
def _get_all_keys(self, mc):
keys = []
# FIXME support several memcached servers
if len(mc.servers) > 1:
print "WARNING: More than one server, you must restart them manually"
server = mc.servers[0]
slabs = mc.get_slabs()[0][1].keys()
item_re = re.compile('^ITEM (?P<key>\S+) \[\d+ b; \d+ s\]$')
for slab in slabs:
server.send_cmd("stats cachedump %s 0" % slab)
line = server.readline()
eof = False
while not eof:
m = item_re.match(line)
if m:
keys.append(m.groupdict()['key'])
line = server.readline()
eof = line == 'END'
return keys
class OpenchangeCleaner(object):
def __init__(self, samba_helper=SambaOCHelper(), dry_run=False):
self.samba_helper = samba_helper
self.dry_run = dry_run
def mapistore_indexing_cleanup(self, username):
print "===== Mapistore indexing cleanup ====="
if not self.dry_run:
c = self.samba_helper.conn.cursor()
sql = ("DELETE FROM mapistore_indexes WHERE username = '%s'" %
MySQLdb.escape_string(username))
c.execute(sql)
sql = ("DELETE FROM mapistore_indexing WHERE username = '%s'" %
MySQLdb.escape_string(username))
c.execute(sql)
c.close()
print " [OC-Indexing] Deleted indexing database for %s" % username
def openchangedb_cleanup(self, username):
print "===== OpenchangeDB cleanup ====="
if not self.dry_run:
c = self.samba_helper.conn.cursor()
sql = ("DELETE FROM mailboxes WHERE name = '%s'" %
MySQLdb.escape_string(username))
c.execute(sql)
c.close()
print " [OC-OpenchangeDB] Deleted openchangedb for %s" % username
def cleanup(self, username):
self.openchangedb_cleanup(username)
self.mapistore_indexing_cleanup(username)
class SOGoCleaner(object):
def __init__(self, dry_run=False, samba_helper=SambaOCHelper()):
self.dry_run = dry_run
self.samba_helper = samba_helper
self.system_defaults_file = "/etc/sogo/sogo.conf"
self.user_defaults_file = os.path.expanduser("~sogo/GNUstep/Defaults/.GNUstepDefaults")
def _as_css_id(self, input_string):
css_char_map = {"_" : "_U_",
"." : "_D_",
"#" : "_H_",
"@" : "_A_",
"*" : "_S_",
":" : "_C_",
"," : "_CO_",
" " : "_SP_",
"'" : "_SQ_",
"&" : "_AM_",
"+" : "_P_"}
new_chars = []
for c in input_string:
if c in css_char_map:
new_chars.append(css_char_map[c])
else:
new_chars.append(c)
return "".join(new_chars)
def sogo_mysql_cleanup(self, dbhost, dbport, dbuser, dbpass, dbname, username):
conn = MySQLdb.connect(host=dbhost, port=int(dbport), user=dbuser, passwd=dbpass,
db=dbname)
c = conn.cursor()
tablename = "sogo_cache_folder_%s" % self._as_css_id(username)
if not self.dry_run:
c.execute("DROP TABLE %s" % tablename)
print " [SOGo MySQL] Table %s deleted" % tablename
def _get_connection_url(self):
connection_url = None
# read defaults from defaults files
# order is important, user defaults must have precedence
for f in [self.system_defaults_file, self.user_defaults_file]:
if os.path.exists(f):
p1 = subprocess.Popen(["sogo-tool", "dump-defaults", "-f", f],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk", "-F\"", "/ OCSFolderInfoURL =/ {print $2}"],
stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
tmp = p2.communicate()[0]
if tmp:
connection_url = tmp
return connection_url
def cleanup(self, username):
print "===== SOGo cleanup ====="
connection_url = self._get_connection_url()
if connection_url is None:
raise Exception("Couldn't fetch OCSFolderInfoURL")
# mysql://sogo:sogo@127.0.0.1:5432/sogo/sogo_folder_info
m = re.search('(?P<scheme>.+)://(?P<user>.+):(?P<pass>.+)@'
'(?P<host>.+):(?P<port>\d+)/(?P<db>.+)/(?P<table>.+)',
connection_url)
if not m:
raise Exception("ERROR Unable to parse OCSFolderInfoURL: %s" %
connection_url)
group_dict = m.groupdict()
if group_dict['scheme'] != 'mysql':
raise Exception("OCSFolderInfoURL should start with mysql:// "
"(we got %s)", group_dict['scheme'])
self.sogo_mysql_cleanup(group_dict['host'], group_dict['port'],
group_dict['user'], group_dict['pass'],
group_dict['db'], username)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
def cleanup(username, samba_helper, ignore=[], dry_run=False):
for klass in (OpenchangeCleaner, SOGoCleaner, ImapCleaner, MemcachedCleaner):
if klass.__name__.split('Cleaner')[0].lower() in ignore:
continue
cleaner = klass(dry_run=dry_run, samba_helper=samba_helper)
try:
cleaner.cleanup(username)
except Exception as e:
print "Error cleaning up with %s: %s" % (str(klass), str(e))
def list_users(samba_helper):
for user in sorted(samba_helper.active_openchange_users()):
print user
parser = optparse.OptionParser("%s [options] <username>" % os.path.basename(sys.argv[0]))
parser.add_option("--users", action="store_true", help="List active openchange users")
parser.add_option("--dry-run", action="store_true", help="Do not perform any action")
parser.add_option("--ignore", action="append", default=[], help=("Ignore to perform "
"some cleaner actions. The ones that exist are: openchange, sogo, "
"imap, memcached"))
opts, args = parser.parse_args()
samba_helper = SambaOCHelper()
if opts.users:
list_users(samba_helper)
sys.exit(0)
elif len(args) != 1:
parser.print_help()
sys.exit(1)
username = args[0]
if samba_helper.invalid_user(username):
print "User %s doesn't exist on samba" % username
sys.exit(2)
if username not in samba_helper.active_openchange_users():
print "User %s is not an active openchange user" % username
sys.exit(2)
cleanup(username, samba_helper, opts.ignore, opts.dry_run)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id: ogr_couchdb.py 33793 2016-03-26 13:02:07Z goatbar $\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: CouchDB driver test... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id: ogr_couchdb.py 33793 2016-03-26 13:02:07Z goatbar $\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Cou... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_couchdb.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: CouchDB driver testing.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
###############################################################################
# Test if driver is available
def ogr_couchdb_init():
ogrtest.couchdb_drv = None
try:
ogrtest.couchdb_drv = ogr.GetDriverByName('CouchDB')
except:
pass
if ogrtest.couchdb_drv is None:
return 'skip'
if 'COUCHDB_TEST_SERVER' in os.environ:
ogrtest.couchdb_test_server = os.environ['COUCHDB_TEST_SERVER']
else:
ogrtest.couchdb_test_server = 'http://gdalautotest.iriscouch.com'
ogrtest.couchdb_test_layer = 'poly'
if gdaltest.gdalurlopen(ogrtest.couchdb_test_server) is None:
print('cannot open %s' % ogrtest.couchdb_test_server)
ogrtest.couchdb_drv = None
return 'skip'
return 'success'
###############################################################################
# Test GetFeatureCount()
def ogr_couchdb_GetFeatureCount():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
count = lyr.GetFeatureCount()
if count != 10:
gdaltest.post_reason('did not get expected feature count')
print(count)
return 'fail'
return 'success'
###############################################################################
# Test GetNextFeature()
def ogr_couchdb_GetNextFeature():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
if feat.GetField('EAS_ID') != 168:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test GetFeature()
def ogr_couchdb_GetFeature():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
feat = lyr.GetFeature(0)
if feat is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
if feat.GetField('EAS_ID') != 168:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test GetSpatialRef()
def ogr_couchdb_GetSpatialRef():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
sr = lyr.GetSpatialRef()
if ogrtest.couchdb_test_layer == 'poly_nongeojson':
if sr is not None:
gdaltest.post_reason('got a srs but did not expect one')
return 'fail'
return 'success'
if sr is None:
gdaltest.post_reason('did not get expected srs')
return 'fail'
txt = sr.ExportToWkt()
if txt.find('OSGB') == -1:
gdaltest.post_reason('did not get expected srs')
print(txt)
return 'fail'
return 'success'
###############################################################################
# Test GetExtent()
def ogr_couchdb_GetExtent():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
extent = lyr.GetExtent()
if extent is None:
gdaltest.post_reason('did not get expected extent')
return 'fail'
if extent != (478315.53125, 481645.3125, 4762880.5, 4765610.5):
gdaltest.post_reason('did not get expected extent')
print(extent)
return 'fail'
return 'success'
###############################################################################
# Test SetSpatialFilter()
def ogr_couchdb_SetSpatialFilter():
if ogrtest.couchdb_drv is None:
return 'skip'
if not ogrtest.have_geos():
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
lyr.SetSpatialFilterRect( 479647, 4764856.5, 480389.6875, 4765610.5 )
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
if feat.GetField('EAS_ID') != 168:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
count = 0
while feat is not None:
count = count + 1
feat = lyr.GetNextFeature()
if count != 5:
gdaltest.post_reason('did not get expected feature count (1)')
print(count)
return 'fail'
count = lyr.GetFeatureCount()
if count != 5:
gdaltest.post_reason('did not get expected feature count (2)')
print(count)
return 'fail'
return 'success'
###############################################################################
# Test SetAttributeFilter()
def ogr_couchdb_SetAttributeFilter():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
lyr.SetAttributeFilter( 'EAS_ID = 170' )
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
if feat.GetField('EAS_ID') != 170:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
count = 0
while feat is not None:
count = count + 1
feat = lyr.GetNextFeature()
if count != 1:
gdaltest.post_reason('did not get expected feature count (1)')
print(count)
return 'fail'
count = lyr.GetFeatureCount()
if count != 1:
gdaltest.post_reason('did not get expected feature count (2)')
print(count)
return 'fail'
return 'success'
###############################################################################
# Test ExecuteSQLStats()
def ogr_couchdb_ExecuteSQLStats():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/%s' % (ogrtest.couchdb_test_server, ogrtest.couchdb_test_layer))
if ds is None:
return 'fail'
lyr = ds.ExecuteSQL('SELECT MIN(EAS_ID), MAX(EAS_ID), AVG(EAS_ID), SUM(EAS_ID), COUNT(*) FROM POLY')
feat = lyr.GetNextFeature()
if feat.GetField('MIN_EAS_ID') != 158 or \
feat.GetField('MAX_EAS_ID') != 179 or \
feat.GetField('AVG_EAS_ID') != 169.1 or \
feat.GetField('SUM_EAS_ID') != 1691 or \
feat.GetField('COUNT_*') != 10:
gdaltest.post_reason('did not get expected values')
feat.DumpReadable()
return 'fail'
ds.ReleaseResultSet(lyr)
return 'success'
###############################################################################
# Test a row layer()
def ogr_couchdb_RowLayer():
if ogrtest.couchdb_drv is None:
return 'skip'
ds = ogr.Open('couchdb:%s/poly/_design/ogr_filter_EAS_ID/_view/filter?include_docs=true' % ogrtest.couchdb_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
feat = lyr.GetFeature(0)
if feat is None:
gdaltest.post_reason('did not get expected feature')
return 'fail'
if feat.GetField('EAS_ID') != 168:
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# ogr_couchdb_changeLayer
def ogr_couchdb_changeLayer():
ogrtest.couchdb_test_layer = 'poly_nongeojson'
return 'success'
# CouchDB tests fail in unreliable ways on Travis
if gdaltest.skip_on_travis():
gdaltest_list = []
else:
gdaltest_list = [
ogr_couchdb_init,
ogr_couchdb_GetFeatureCount,
ogr_couchdb_GetNextFeature,
ogr_couchdb_GetFeature,
ogr_couchdb_GetSpatialRef,
ogr_couchdb_GetExtent,
ogr_couchdb_SetSpatialFilter,
ogr_couchdb_SetAttributeFilter,
ogr_couchdb_ExecuteSQLStats,
ogr_couchdb_RowLayer,
ogr_couchdb_changeLayer,
ogr_couchdb_GetFeatureCount,
ogr_couchdb_GetNextFeature,
ogr_couchdb_GetFeature,
ogr_couchdb_GetSpatialRef,
ogr_couchdb_GetExtent,
ogr_couchdb_SetSpatialFilter,
ogr_couchdb_SetAttributeFilter
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_couchdb' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
``` |
[
{
"content": "Replicate the source code:\n```python\nimport unittest\n\nfrom dart.client.python.dart_client import Dart\nfrom dart.engine.no_op.metadata import NoOpActionTypes\nfrom dart.model.action import ActionData, Action, ActionState\nfrom dart.model.datastore import Datastore, DatastoreData, DatastoreStat... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport unittest\n\nfrom dart.client.python.dart_client import Dart\nfrom dart.engine.no_op.metadata import NoOpActionTypes\nfrom dart.model.action import ActionData, Action, ActionState\nfrom dart.model.datastore import Datastore, DatastoreDat... | ```python
import unittest
from dart.client.python.dart_client import Dart
from dart.engine.no_op.metadata import NoOpActionTypes
from dart.model.action import ActionData, Action, ActionState
from dart.model.datastore import Datastore, DatastoreData, DatastoreState
from dart.model.trigger import Trigger, TriggerState
from dart.model.trigger import TriggerData
from dart.model.workflow import WorkflowData, WorkflowState, WorkflowInstanceState
from dart.model.workflow import Workflow
class TestWorkflowChaining(unittest.TestCase):
def setUp(self):
dart = Dart(host='localhost', port=5000)
""" :type dart: dart.client.python.dart_client.Dart """
self.dart = dart
dst_args = {'action_sleep_time_in_seconds': 0}
dst0 = Datastore(data=DatastoreData('test-datastore0', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE))
self.datastore0 = self.dart.save_datastore(dst0)
dst1 = Datastore(data=DatastoreData('test-datastore1', 'no_op_engine', args=dst_args, state=DatastoreState.TEMPLATE))
self.datastore1 = self.dart.save_datastore(dst1)
wf0 = Workflow(data=WorkflowData('test-workflow0', self.datastore0.id, state=WorkflowState.ACTIVE))
self.workflow0 = self.dart.save_workflow(wf0, self.datastore0.id)
wf1 = Workflow(data=WorkflowData('test-workflow1', self.datastore1.id, state=WorkflowState.ACTIVE))
self.workflow1 = self.dart.save_workflow(wf1, self.datastore1.id)
a00 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
a01 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
self.action00, self.action01 = self.dart.save_actions([a00, a01], workflow_id=self.workflow0.id)
a10 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
a11 = Action(data=ActionData(NoOpActionTypes.action_that_succeeds.name, NoOpActionTypes.action_that_succeeds.name, state=ActionState.TEMPLATE))
self.action10, self.action11 = self.dart.save_actions([a10, a11], workflow_id=self.workflow1.id)
tr_args = {'completed_workflow_id': self.workflow0.id}
tr = Trigger(data=TriggerData('test-trigger', 'workflow_completion', [self.workflow1.id], tr_args, TriggerState.ACTIVE))
self.trigger = self.dart.save_trigger(tr)
def tearDown(self):
for a in self.dart.get_actions(workflow_id=self.workflow0.id):
self.dart.delete_action(a.id)
for a in self.dart.get_actions(workflow_id=self.workflow1.id):
self.dart.delete_action(a.id)
for wfi in self.dart.get_workflow_instances(self.workflow0.id):
self.dart.delete_datastore(wfi.data.datastore_id)
for wfi in self.dart.get_workflow_instances(self.workflow1.id):
self.dart.delete_datastore(wfi.data.datastore_id)
self.dart.delete_trigger(self.trigger.id)
self.dart.delete_workflow_instances(self.workflow0.id)
self.dart.delete_workflow_instances(self.workflow1.id)
self.dart.delete_workflow(self.workflow0.id)
self.dart.delete_workflow(self.workflow1.id)
self.dart.delete_datastore(self.datastore0.id)
self.dart.delete_datastore(self.datastore1.id)
def test_workflow_chaining(self):
self.dart.manually_trigger_workflow(self.workflow0.id)
wf_instances = self.dart.await_workflow_completion(self.workflow1.id)
for wfi in wf_instances:
self.assertEqual(wfi.data.state, WorkflowInstanceState.COMPLETED)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nimport cointipbot, traceback, time\n\nclass Main():\n cb = None\n\n def __init__(self):\n '''\n Unfortunately, we cannot instantiate CointipBot only once due to the way pifkoin works. \n\n Pifkoin will open a... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nimport cointipbot, traceback, time\n\nclass Main():\n cb = None\n\n def __init__(self):\n '''\n Unfortunately, we cannot instantiate CointipBot only once due to the way pifkoin works. \n\n Pif... | ```python
import cointipbot, traceback, time
class Main():
cb = None
def __init__(self):
'''
Unfortunately, we cannot instantiate CointipBot only once due to the way pifkoin works.
Pifkoin will open an HTTP connection to the RPC client which will eventually close when the bot completes its checks. Therefore, if we try to loop
CointipBot.main() without reinstanting the object, httplib will throw BadStatusLine because the connection is no longer valid and cannot be used.
May make a pull request to pifkoin to resolve this. If this does get resolved in pifkoin or you locally modify your pifkoin copy to resolve this, you can
uncomment the following line and comment the self.cb assignment in main(). This will ensure we do not need to reinstantiate CoinbotTip in every iteration.
'''
# self.cb = cointipbot.CointipBot()
def main(self):
self.cb = cointipbot.CointipBot()
self.cb.main()
def secondary(main):
try:
while True:
main.main();
except:
traceback.print_exc()
print('Resuming in 7 seconds')
time.sleep(7)
print('Resumed')
while True:
main = Main()
secondary(main)
``` |
[
{
"content": "Write the code verbatim:\n```python\nfrom enigma import eRCInput, getPrevAsciiCode\nfrom Screens.Screen import Screen\nfrom Screens.MessageBox import MessageBox\nfrom Screens.VirtualKeyBoard import VirtualKeyBoard\nfrom Components.ActionMap import NumberActionMap\nfrom Components.Label import Labe... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nfrom enigma import eRCInput, getPrevAsciiCode\nfrom Screens.Screen import Screen\nfrom Screens.MessageBox import MessageBox\nfrom Screens.VirtualKeyBoard import VirtualKeyBoard\nfrom Components.ActionMap import NumberActionMap\nfrom Components.L... | ```python
from enigma import eRCInput, getPrevAsciiCode
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.Input import Input
from Components.Pixmap import Pixmap
from Tools.BoundFunction import boundFunction
from Tools.Notifications import AddPopup
from time import time
from Components.config import config
class InputBox(Screen):
def __init__(self, session, title = "", windowTitle = _("Input"), useableChars = None, **kwargs):
Screen.__init__(self, session)
self["text"] = Label(title)
self["input"] = Input(**kwargs)
self["VKeyIcon"] = Pixmap()
self["help_text"] = Label(_("use virtual keyboard for text input"))
self.onShown.append(boundFunction(self.setTitle, windowTitle))
if useableChars is not None:
self["input"].setUseableChars(useableChars)
self["actions"] = NumberActionMap(["WizardActions", "InputBoxActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.gotAsciiCode,
"ok": self.go,
"back": self.cancel,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"tab": self.keyTab,
"toggleOverwrite": self.keyInsert,
"showVirtualKeyboard": self.virtualKeyBoard,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
if self["input"].type == Input.TEXT:
if config.misc.remotecontrol_text_support.value:
self.onExecBegin.append(self.setKeyboardModeNone)
else:
self.onExecBegin.append(self.setKeyboardModeAscii)
else:
self.onExecBegin.append(self.setKeyboardModeNone)
def virtualKeyBoard(self):
self.input_text = self["input"].getText()
input_title = self["text"].getText()
self.session.openWithCallback(self.virtualKeyBoardCB, VirtualKeyBoard, title = input_title, text = self.input_text)
def virtualKeyBoardCB(self, res):
if res:
self.input_text = res
self["input"].setText(self.input_text)
self["input"].end()
def gotAsciiCode(self):
self["input"].handleAscii(getPrevAsciiCode())
def keyLeft(self):
self["input"].left()
def keyRight(self):
self["input"].right()
def keyNumberGlobal(self, number):
self["input"].number(number)
def keyDelete(self):
self["input"].delete()
def go(self):
self.close(self["input"].getText())
def cancel(self):
self.close(None)
def keyHome(self):
self["input"].home()
def keyEnd(self):
self["input"].end()
def keyBackspace(self):
self["input"].deleteBackward()
def keyTab(self):
self["input"].tab()
def keyInsert(self):
self["input"].toggleOverwrite()
class PinInput(InputBox):
def __init__(self, session, service = "", triesEntry = None, pinList = [], popup = False, simple=True, *args, **kwargs):
InputBox.__init__(self, session = session, text = " ", maxSize = True, type = Input.PIN, *args, **kwargs)
self.waitTime = 15
self.triesEntry = triesEntry
self.pinList = pinList
self["service"] = Label(service)
if service and simple:
self.skinName = "PinInputPopup"
if self.getTries() == 0:
if (self.triesEntry.time.value + (self.waitTime * 60)) > time():
remaining = (self.triesEntry.time.value + (self.waitTime * 60)) - time()
remainingMinutes = int(remaining / 60)
remainingSeconds = int(remaining % 60)
messageText = _("You have to wait %s!") % (str(remainingMinutes) + " " + _("minutes") + ", " + str(remainingSeconds) + " " + _("seconds"))
if service and simple:
AddPopup(messageText, type = MessageBox.TYPE_ERROR, timeout = 3)
self.closePinCancel()
else:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.closePinCancel, MessageBox, messageText, MessageBox.TYPE_ERROR, timeout = 3))
else:
self.setTries(3)
self["tries"] = Label("")
self.onShown.append(self.showTries)
def gotAsciiCode(self):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.gotAsciiCode(self)
self.go()
else:
InputBox.gotAsciiCode(self)
def keyNumberGlobal(self, number):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.keyNumberGlobal(self, number)
self.go()
else:
InputBox.keyNumberGlobal(self, number)
def checkPin(self, pin):
if pin is not None and " " not in pin and int(pin) in self.pinList:
return True
return False
def go(self):
if self.pinList:
self.triesEntry.time.value = int(time())
self.triesEntry.time.save()
if self.checkPin(self["input"].getText()):
self.setTries(3)
self.closePinCorrect()
else:
self.keyHome()
self.decTries()
if self.getTries() == 0:
self.closePinWrong()
else:
pin = self["input"].getText()
if pin and pin.isdigit():
self.close(int(pin))
else:
self.close(None)
def closePinWrong(self, *args):
print "args:", args
self.close(False)
def closePinCorrect(self, *args):
self.setTries(3)
self.close(True)
def closePinCancel(self, *args):
self.close(None)
def cancel(self):
self.closePinCancel()
def getTries(self):
return self.triesEntry and self.triesEntry.tries.value
def decTries(self):
self.setTries(self.triesEntry.tries.value - 1)
self.showTries()
def setTries(self, tries):
self.triesEntry.tries.value = tries
self.triesEntry.tries.save()
def showTries(self):
self["tries"].setText(self.triesEntry and _("Tries left:") + " " + str(self.getTries() or ""))
def keyRight(self):
pass
``` |
[
{
"content": "Here is a code snippet:\n```python\n# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n \n def forwards(self, orm):\n \n # Adding model 'Classifier'\n db.cr... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n \n def forwards(self, orm):\n \n # Adding model 'Classifier... | ```python
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Classifier'
db.create_table('userpypi_classifier', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('userpypi', ['Classifier'])
# Adding model 'Project'
db.create_table('userpypi_project', (
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('license', self.gf('django.db.models.fields.TextField')(blank=True)),
('metadata_version', self.gf('django.db.models.fields.CharField')(default=1.0, max_length=64)),
('author', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('home_page', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('download_url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('summary', self.gf('django.db.models.fields.TextField')(blank=True)),
('author_email', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', to=orm['auth.User'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
))
db.send_create_signal('userpypi', ['Project'])
# Adding M2M table for field classifiers on 'Project'
db.create_table('userpypi_project_classifiers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['userpypi.project'], null=False)),
('classifier', models.ForeignKey(orm['userpypi.classifier'], null=False))
))
db.create_unique('userpypi_project_classifiers', ['project_id', 'classifier_id'])
# Adding model 'Release'
db.create_table('userpypi_release', (
('upload_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('md5_digest', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('filetype', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('pyversion', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='releases', to=orm['userpypi.Project'])),
('platform', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('version', self.gf('django.db.models.fields.CharField')(max_length=128)),
('signature', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('distribution', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('userpypi', ['Release'])
# Adding unique constraint on 'Release', fields ['project', 'version', 'platform', 'distribution', 'pyversion']
db.create_unique('userpypi_release', ['project_id', 'version', 'platform', 'distribution', 'pyversion'])
def backwards(self, orm):
# Deleting model 'Classifier'
db.delete_table('userpypi_classifier')
# Deleting model 'Project'
db.delete_table('userpypi_project')
# Removing M2M table for field classifiers on 'Project'
db.delete_table('userpypi_project_classifiers')
# Deleting model 'Release'
db.delete_table('userpypi_release')
# Removing unique constraint on 'Release', fields ['project', 'version', 'platform', 'distribution', 'pyversion']
db.delete_unique('userpypi_release', ['project_id', 'version', 'platform', 'distribution', 'pyversion'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'userpypi.classifier': {
'Meta': {'object_name': 'Classifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'userpypi.project': {
'Meta': {'object_name': 'Project'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'author_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['userpypi.Classifier']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'metadata_version': ('django.db.models.fields.CharField', [], {'default': '1.0', 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'userpypi.release': {
'Meta': {'unique_together': "(('project', 'version', 'platform', 'distribution', 'pyversion'),)", 'object_name': 'Release'},
'distribution': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5_digest': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['userpypi.Project']"}),
'pyversion': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['userpypi']
``` |
[
{
"content": "Replicate the source code:\n```python\nfrom typing import Dict\n\nfrom Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DifferentialValueBase import DifferentialValueBase\nfrom Src.Core.Entity.EntityBase import EntityBase\n\n\nclass DifferentialMicroRnaExpressionLevelValue(DifferentialValueB... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nfrom typing import Dict\n\nfrom Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DifferentialValueBase import DifferentialValueBase\nfrom Src.Core.Entity.EntityBase import EntityBase\n\n\nclass DifferentialMicroRnaExpressionLevelValue(Di... | ```python
from typing import Dict
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DifferentialValueBase import DifferentialValueBase
from Src.Core.Entity.EntityBase import EntityBase
class DifferentialMicroRnaExpressionLevelValue(DifferentialValueBase, EntityBase):
"""description of class"""
def __init__(self, **kargs):
"""
:param kargs:
"""
super().__init__(**kargs)
self.__symbol = kargs.get('symbol', None)
def __hash__(self):
return hash(self.symbol)
def __eq__(self, other):
return isinstance(other, DifferentialMicroRnaExpressionLevelValue) and \
self.symbol == other.symbol
@property
def symbol(self) -> int:
"""description of property"""
return self.__symbol
@symbol.setter
def symbol(self, value: int):
"""
:param value:
:return:
"""
self.__symbol = value
def validate(self):
super().validate()
if not self.__symbol:
raise ValueError('symbol is required.')
def as_dict(self)-> Dict:
diff_dict = super().as_dict()
diff_dict.update({'symbol': self.__symbol})
return diff_dict
``` |
[
{
"content": "```python\n\"\"\"The description for an FMOD Studio Event.\"\"\"\n\nfrom ctypes import byref, c_int, c_void_p, create_string_buffer\n\nfrom .event_instance import EventInstance\nfrom .studio_object import StudioObject\nfrom .enums import LOADING_STATE\n\nclass EventDescription(StudioObject):\n ... | [
{
"content": "<|memory_start|>```python\n\"\"\"The description for an FMOD Studio Event.\"\"\"\n\nfrom ctypes import byref, c_int, c_void_p, create_string_buffer\n\nfrom .event_instance import EventInstance\nfrom .studio_object import StudioObject\nfrom .enums import LOADING_STATE\n\nclass EventDescription(Stud... | ```python
"""The description for an FMOD Studio Event."""
from ctypes import byref, c_int, c_void_p, create_string_buffer
from .event_instance import EventInstance
from .studio_object import StudioObject
from .enums import LOADING_STATE
class EventDescription(StudioObject):
"""The description for an FMOD Studio Event.
Event descriptions belong to banks and can be queried after the relevant
bank has been loaded. Event descriptions may be retrieved via path or GUID
lookup, or by enumerating all descriptions in a bank.
"""
function_prefix = "FMOD_Studio_EventDescription"
@property
def path(self):
"""The path."""
required = c_int()
self._call("GetPath", None, 0, byref(required))
path_buffer = create_string_buffer(required.value)
self._call("GetPath", path_buffer, len(path_buffer), None)
return path_buffer.value.decode("utf-8")
def create_instance(self):
"""Create a playable instance."""
instance_ptr = c_void_p()
self._call("CreateInstance", byref(instance_ptr))
return EventInstance(instance_ptr)
@property
def parameter_description_count(self):
"""The number of parameters in the event."""
count = c_int()
self._call("GetParameterDescriptionCount", byref(count))
return count.value
@property
def user_property_count(self):
"""The number of user properties attached to the event."""
count = c_int()
self._call("GetUserPropertyCount", byref(count))
return count.value
def load_sample_data(self):
"""Loads non-streaming sample data used by the event."""
self._call("LoadSampleData")
@property
def sample_loading_state(self):
"""Retrieves the sample data loading state."""
state = c_int()
self._call("GetSampleLoadingState", byref(state))
return LOADING_STATE(state.value)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n#\n# matrigram documentation build configuration file, created by\n# sphinx-quickstart on Tue Dec 20 11:09:38 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# matrigram documentation build configuration file, created by\n# sphinx-quickstart on Tue Dec 20 11:09:38 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configura... | ```python
# -*- coding: utf-8 -*-
#
# matrigram documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 20 11:09:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../matrigram'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'matrigram'
copyright = u'2016, Gal Pressman & Yuval Fatael'
author = u'Gal Pressman & Yuval Fatael'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'GalPressman',
'github_repo': 'matrigram',
'github_banner': True,
'github_button': True,
'travis_button': True,
'show_powered_by': False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'matrigram v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'logo.jpg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
html_sidebars = {
'**': [
'about.html',
'badges.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'matrigramdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'matrigram.tex', u'matrigram Documentation',
u'Gal Pressman \\& Yuval Fatael', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'matrigram', u'matrigram Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'matrigram', u'matrigram Documentation',
author, 'matrigram', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
``` |
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/env python2.3\n\n# Fix one or all canary records by replacing all medline metadata\n# with results of a fresh pubmed query.\n#\n# $Id$\n\nfrom optparse import OptionParser\nimport pprint\nimport time\n\nimport canary.context\nfrom canary.loader import Pars... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/env python2.3\n\n# Fix one or all canary records by replacing all medline metadata\n# with results of a fresh pubmed query.\n#\n# $Id$\n\nfrom optparse import OptionParser\nimport pprint\nimport time\n\nimport canary.context\nfrom canary.lo... | ```python
#!/usr/bin/env python2.3
# Fix one or all canary records by replacing all medline metadata
# with results of a fresh pubmed query.
#
# $Id$
from optparse import OptionParser
import pprint
import time
import canary.context
from canary.loader import Parser, QueuedRecord
from canary.search import PubmedSearch
from canary.study import Study
if __name__ == '__main__':
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('-i', '--canary-identifier',
dest='canary_id', default=0,
help='specific canary id to fetch')
parser.add_option('-c', '--config',
dest='config', default='conf/canary_config.py',
help='path to configuration file')
parser.add_option('-u', '--update',
action='store_true', dest='update', default=False,
help='save updated data to the database')
(options, args) = parser.parse_args()
context = canary.context.Context()
cursor = context.get_cursor()
# get a complete mapping
source_catalog = context.get_source_catalog()
complete_mapping = source_catalog.get_complete_mapping()
# pubmed-medline is source 13
pubmed_source = source_catalog.get_source(13)
pubmed_search = PubmedSearch()
if options.canary_id:
rows = [[options.canary_id,],]
else:
# get all active queuedrecord ids
cursor.execute("""
SELECT uid
FROM queued_records
""")
rows = cursor.fetchall()
parser = Parser(pubmed_source)
for row in rows:
qr = QueuedRecord(context, row[0])
print 'Fetching pubmed data for ui %s' % qr.unique_identifier
pm_data = pubmed_search.fetch(qr.unique_identifier)
fetched_records = parser.parse(mapped_terms=complete_mapping,
is_email=False, data=pm_data)
if len(fetched_records) != 1:
print 'Fetch for %s (%s) found %s records, ignoring' % (ui,
qr.uid, len(fetched_records))
else:
print 'Orig metadata:', qr.metadata
fetched_rec = fetched_records[0]
print 'Fetched metadata:', fetched_rec.metadata
fetched_rec_metadata = fetched_rec.get_mapped_metadata(complete_mapping)
print 'Fetched metadata, mapped:', fetched_rec_metadata
if options.update:
print 'Updating.'
qr.metadata = fetched_rec.metadata
qr.save(context)
# It is a condition of Entrez eutilities to wait 3s bet. requests
time.sleep(3)
``` |
[
{
"content": "Replicate the source code:\n```python\n# Copyright 1998-2007 Gentoo Foundation\n# Distributed under the terms of the GNU General Public License v2\n\nimport codecs\nimport errno\nimport sys\nfrom portage import os\nfrom portage import _encodings\nfrom portage import _unicode_encode\nfrom portage.t... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# Copyright 1998-2007 Gentoo Foundation\n# Distributed under the terms of the GNU General Public License v2\n\nimport codecs\nimport errno\nimport sys\nfrom portage import os\nfrom portage import _encodings\nfrom portage import _unicode_encode... | ```python
# Copyright 1998-2007 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import codecs
import errno
import sys
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.tests import TestCase
class SpawnTestCase(TestCase):
def testLogfile(self):
from portage import settings, spawn
from tempfile import mkstemp
logfile = None
try:
fd, logfile = mkstemp()
os.close(fd)
null_fd = os.open('/dev/null', os.O_RDWR)
test_string = 2 * "blah blah blah\n"
# Test cases are unique because they run inside src_test() which
# may or may not already be running within a sandbox. Interaction
# with SANDBOX_* variables may trigger unwanted sandbox violations
# that are only reproducible with certain combinations of sandbox,
# usersandbox, and userpriv FEATURES. Attempts to filter SANDBOX_*
# variables can interfere with a currently running sandbox
# instance. Therefore, use free=1 here to avoid potential
# interactions (see bug #190268).
spawn("echo -n '%s'" % test_string, settings, logfile=logfile,
free=1, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd})
os.close(null_fd)
f = codecs.open(_unicode_encode(logfile,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
log_content = f.read()
f.close()
# When logging passes through a pty, this comparison will fail
# unless the oflag terminal attributes have the termios.OPOST
# bit disabled. Otherwise, tranformations such as \n -> \r\n
# may occur.
self.assertEqual(test_string, log_content)
finally:
if logfile:
try:
os.unlink(logfile)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
``` |
[
{
"content": "Repeat the following code:\n```python\nfrom flask import Flask, render_template, json, request, redirect, session\nfrom flask.ext.mysql import MySQL\nfrom werkzeug import generate_password_hash, check_password_hash\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps import Map\nimport ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nfrom flask import Flask, render_template, json, request, redirect, session\nfrom flask.ext.mysql import MySQL\nfrom werkzeug import generate_password_hash, check_password_hash\nfrom flask_googlemaps import GoogleMaps\nfrom flask_googlemaps imp... | ```python
from flask import Flask, render_template, json, request, redirect, session
from flask.ext.mysql import MySQL
from werkzeug import generate_password_hash, check_password_hash
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
import pygal
from pygal.style import BlueStyle, NeonStyle,DarkSolarizedStyle, LightSolarizedStyle, LightColorizedStyle, DarkColorizedStyle, TurquoiseStyle
app = Flask(__name__)
GoogleMaps(app)
app.secret_key = 'ssh...Big secret!'
#MySQL configurations
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'tekken5'
app.config['MYSQL_DATABASE_DB'] = 'safelocality'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
# route to index.html
@app.route("/")
def main():
if(session.get('user')):
return render_template('home.html',session = session)
else:
return render_template('home.html')
# route to signup.html
@app.route('/showSignUp')
def showSignUp():
return render_template('signup.html')
# interact with MySQL for sign up
@app.route('/signUp',methods=['POST'])
def signUp():
try:
_name = request.form['inputName']
_email = request.form['inputEmail']
_password = request.form['inputPassword']
_firstname = request.form['inputFirstName']
_lastname = request.form['inputLastName']
# validate the received values
if _name and _email and _password:
# All Good, let's call MySQL
conn = mysql.connect()
cursor = conn.cursor()
_hashed_password = generate_password_hash(_password)
cursor.callproc('sp_createUser',(_name,_firstname,_lastname,_email,_hashed_password))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
return redirect('/showSignin')
else:
return json.dumps({'error':str(data[0])})
else:
return json.dumps({'html':'<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'error':str(e)})
finally:
cursor.close()
conn.close()
@app.route('/showSignin')
def showSignin():
return render_template('signin.html')
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['inputEmail']
_password = request.form['inputPassword']
# connect to mysql
con = mysql.connect()
cursor = con.cursor()
cursor.callproc('sp_validateLogin',(_username,))
data = cursor.fetchall()
if len(data) > 0:
if check_password_hash(str(data[0][5]),_password):
session['user'] = data[0][0]
print "here"
return render_template('home.html')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
else:
return render_template('error.html',error = 'Wrong Email address or Password.')
except Exception as e:
return render_template('error.html',error = str(e))
finally:
cursor.close()
con.close()
@app.route('/dashboard')
def demo():
newDict = {}
with open('Places.txt','r') as lines:
for i in lines:
k=i.split(',')
v=k[2].strip("\n").strip("\r")
cord=[k[1],v]
newDict[k[0]] = cord
conn = mysql.connect()
cursor = conn.cursor()
#No need to repeatedly create and delete tuples from the Coordinates table
#place=[]
#lat=[]
#lon=[]
#k=0
#print newDict
#for i in newDict:
# place.append(i)
# lat.append(float(newDict[i][0]))
# lon.append(float(newDict[i][1]))
#cursor.callproc('sp_addLoc',('dfsd',12.12,12.1234,))
#for i in range(0,len(place)):
# cursor.callproc('sp_addLoc',(place[i],lat[i],lon[i]))
#cursor.execute("DELETE FROM Coordinates WHERE Loc_id<6 and Loc_id>8")
cursor.execute("SELECT Loc_name FROM Coordinates ORDER BY Loc_name DESC")
data = cursor.fetchall()
print data
conn.commit()
cursor.close()
conn.close()
if(session.get('user')):
return render_template('dashboard.html', data = data,session=session)
else:
return render_template('dashboard.html',data = data)
######################################################################################33
#This is the review form implementation
@app.route('/addStats')
def displayForm():
return render_template('addStats.html')
@app.route('/addStats', methods=['POST'])
def takeData():
locale=str(request.form['inputLocale'])
water=int(request.form['inputWater'])
electricity=int(request.form['inputElectricity'])
network=int(request.form['inputNetworkAvailability'])
cleanliness=int(request.form['inputCleanliness'])
green=int(request.form['inputGreenSpace'])
life=int(request.form['inputNightlife'])
rmen=int(request.form['inputRepairmenAvailability'])
edu=int(request.form['inputeducation'])
nhood=int(request.form['inputNeighbourhood'])
lent=int(request.form['inputLocalEntertainment'])
rev=str(request.form['inputReview'])
uid=int(session.get('user'))
conn=mysql.connect()
cur=conn.cursor()
cur.execute("Select Loc_id from Coordinates where Loc_name=%s",(locale))
lid=int(cur.fetchone()[0])
cur.execute("Insert into Review (UserId,Loc_id,review_text) values(%s,%s,%s)",(uid,lid,rev))
conn.commit()
cur.callproc('sp_addStats',(uid,lid,water,electricity,network,cleanliness, green, lent, life, rmen, edu, nhood))
conn.commit()
cur.close()
conn.close()
return render_template('home.html')
######################################################################################
@app.route('/places/<place_name>/')
def places(place_name):
if session.get('user'):
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Coordinates WHERE Loc_name = %s", (place_name))
data = cursor.fetchall()[0]
name=data[1]
conn.commit()
cursor.close()
conn.close()
mymap = Map(
identifier="view-side",
lat=data[2],
lng=data[3],
markers=[(37.4419, -122.1419)]
)
lat = data[2]
lon = data[3]
#The graph is made and passed on from here onwards
###################################################
title="Crime Rates"
crime_graph=pygal.Bar(width=600, height=600, explicit_size=True, title=title, style=BlueStyle, disable_xml_declaration=True, range=(0,10))
crime_labels=['Theft','Violence', 'Harassment']
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("select avg(Theft), avg(Violence), avg(Harassment) from Security, Coordinates where Coordinates.Loc_id=Security.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
crime_values=[data1[0],data1[1],data1[2]]
crime_graph.x_labels=crime_labels
crime_graph.add('Rating', crime_values)
lifestyle_graph=pygal.Bar(width=1200, height=600, explicit_size=True, title="Living Standards", style=BlueStyle, disable_xml_declaration=True, range=(0,10))
cursor.close()
conn.close()
conn = mysql.connect()
cursor = conn.cursor()
#procedure not used for simpler execution
cursor.execute("SELECT avg(Water), avg(Electricity), avg(Network_Availability), avg(Cleanliness), avg(Green_space), avg(Local_Entertainment), avg(NightLife), avg(Repairmen_avail), avg(Education), avg(Neighbourhood) from LifeStyle, Coordinates where Coordinates.Loc_id=LifeStyle.Loc_id and Coordinates.Loc_name=%s",(place_name))
data1=cursor.fetchone()
lifestyle_values=[data1[0], data1[1], data1[2], data1[3], data1[4], data1[5], data1[6], data1[7], data1[8], data1[9]]
lifestyle_labels=["Water", "Electricity", "Network Availability", "Cleanliness", "Green Space", "Local Entertainment", "Night Life", "Services", "Education", "Neighbourhood"]
lifestyle_graph.x_labels=lifestyle_labels
lifestyle_graph.add('Rating', lifestyle_values)
graphs=[crime_graph, lifestyle_graph]
cursor.close()
conn.close()
########################################################
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * FROM Review where Loc_id = (SELECT Loc_id from Coordinates where Loc_name=%s) ",(place_name))
dat = cursor.fetchall()
use_fec=[]
for review in dat:
cursor.execute("SELECT UserName from User where UserId = %s", review[0])
use_fec.append([cursor.fetchone()[0],review[2]])
print use_fec
return render_template('demo.html', use_fec=use_fec, rev_data=dat,name=name, mymap=mymap, data=data,lat = data[2], lon=data[3], graphs=graphs,dat=dat)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/demo')
def userHome():
if session.get('user'):
mymap = Map(
identifier="view-side",
lat=37.4419,
lng=-122.1419,
markers=[(37.4419, -122.1419)]
)
return render_template('demo.html', mymap=mymap)
else:
return render_template('error.html',error = 'Unauthorized Access')
@app.route('/logout')
def logout():
session.pop('user',None)
return render_template('home.html')
if __name__ == "__main__":
app.debug = True
app.run()
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Lic... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.enums.types import mobile_device_type
from google.ads.googleads.v8.resources.types import mobile_device_constant
from google.ads.googleads.v8.services.types import mobile_device_constant_service
from .transports.base import MobileDeviceConstantServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MobileDeviceConstantServiceGrpcTransport
class MobileDeviceConstantServiceClientMeta(type):
"""Metaclass for the MobileDeviceConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[MobileDeviceConstantServiceTransport]]
_transport_registry['grpc'] = MobileDeviceConstantServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[MobileDeviceConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MobileDeviceConstantServiceClient(metaclass=MobileDeviceConstantServiceClientMeta):
"""Service to fetch mobile device constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileDeviceConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileDeviceConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MobileDeviceConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
MobileDeviceConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def mobile_device_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified mobile_device_constant string."""
return "mobileDeviceConstants/{criterion_id}".format(criterion_id=criterion_id, )
@staticmethod
def parse_mobile_device_constant_path(path: str) -> Dict[str,str]:
"""Parse a mobile_device_constant path into its component segments."""
m = re.match(r"^mobileDeviceConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MobileDeviceConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the mobile device constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MobileDeviceConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MobileDeviceConstantServiceTransport):
# transport is a MobileDeviceConstantServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MobileDeviceConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_mobile_device_constant(self,
request: mobile_device_constant_service.GetMobileDeviceConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> mobile_device_constant.MobileDeviceConstant:
r"""Returns the requested mobile device constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetMobileDeviceConstantRequest`):
The request object. Request message for
[MobileDeviceConstantService.GetMobileDeviceConstant][google.ads.googleads.v8.services.MobileDeviceConstantService.GetMobileDeviceConstant].
resource_name (:class:`str`):
Required. Resource name of the mobile
device to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.MobileDeviceConstant:
A mobile device constant.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a mobile_device_constant_service.GetMobileDeviceConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, mobile_device_constant_service.GetMobileDeviceConstantRequest):
request = mobile_device_constant_service.GetMobileDeviceConstantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_mobile_device_constant]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'MobileDeviceConstantServiceClient',
)
``` |
[
{
"content": "Recreate the original code text:\n```python\n#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\nimport os\nfrom django.conf.urls.defaults import *\n\nurlpatterns = []\n\n\n# load the rapidsms configuration\nfrom rapidsms.config import Config\nconf = Config(os.environ[\"RAPIDSMS_INI\"])\n\n\n# ... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\nimport os\nfrom django.conf.urls.defaults import *\n\nurlpatterns = []\n\n\n# load the rapidsms configuration\nfrom rapidsms.config import Config\nconf = Config(os.environ[\"RAPIDSMS... | ```python
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os
from django.conf.urls.defaults import *
urlpatterns = []
# load the rapidsms configuration
from rapidsms.config import Config
conf = Config(os.environ["RAPIDSMS_INI"])
# iterate each of the active rapidsms apps (from the ini),
# and (attempt to) import the urls.py from each. it's okay
# if this fails, since not all apps have a webui
for rs_app in conf["rapidsms"]["apps"]:
try:
# import the single "urlpatterns" attribute
package_name = "apps.%s.urls" % (rs_app["type"])
module = __import__(package_name, {}, {}, ["urlpatterns"])
# add the explicitly defined urlpatterns
urlpatterns += module.urlpatterns
# does urls.py have a sibling "static" dir?
mod_dir = os.path.dirname(module.__file__)
static_dir = "%s/static" % mod_dir
if os.path.exists(static_dir):
# found a static dir, so automatically serve those files
# via django. this is frowned upon in production, since
# the server isn't tough (or fast), but there are so many
# places that static files can come from, i'm not sure how
# we would auto-configure that in apache. maybe we could
# extend manager.py, to output an http conf mapping all
# of this stuff for apache?
urlpatterns += patterns("", url(
"^static/%s/(?P<path>.*)$" % rs_app["type"],
"django.views.static.serve",
{"document_root": static_dir }
))
# urls.py couldn't be imported for
# this app. no matter, just carry
# on importing the others
except ImportError:
pass
# urls.py was imported, but it didn't
# have "urlpatterns" attribute. this
# should not happen... but does
except AttributeError:
pass
``` |
[
{
"content": "Here is some code:\n```python\n\"\"\"\n mbed CMSIS-DAP debugger\n Copyright (c) 2006-2013 ARM Limited\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://ww... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n\"\"\"\n mbed CMSIS-DAP debugger\n Copyright (c) 2006-2013 ARM Limited\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n... | ```python
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .family.target_kinetis import Kinetis
from .family.flash_kinetis import Flash_Kinetis
from ..core.memory_map import (FlashRegion, RamRegion, RomRegion, MemoryMap)
from ..coresight import ap
from ..coresight.cortex_m import CortexM
from ..debug.svd import SVDFile
import logging
import os.path
from time import (time, sleep)
SIM_SDID = 0x40075024
SIM_SDID_KEYATTR_MASK = 0x70
SIM_SDID_KEYATTR_SHIFT = 4
KEYATTR_DUAL_CORE = 1
RCM_MR = 0x4007f010
RCM_MR_BOOTROM_MASK = 0x6
SCG_CSR = 0x4007B010
SCG_RCCR = 0x4007B014
SCS_MASK = 0x0F000000
SCS_SHIFT = 24
DIVCORE_MASK = 0x000F0000
DIVCORE_SHIFT = 16
DIVSLOW_MASK = 0x0000000F
DIVSLOW_SHIFT = 0
SCG_FIRCCSR = 0x4007B300
FIRCEN_MASK = 1
SCG_FIRCCFG = 0x4007B308
RECOVER_TIMEOUT = 1.0 # 1 second
flash_algo = {
'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4832b510, 0x60414930, 0x60814931, 0x22806801, 0x22204391, 0x60014311, 0x4448482e, 0xf860f000,
0xd0002800, 0xbd102001, 0x47702000, 0xb5104829, 0x44484929, 0xf922f000, 0xd1042800, 0x21004825,
0xf0004448, 0x4a25f9c3, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x4c1fb570, 0x444c4605, 0x4b1e4601,
0x68e24620, 0xf89ef000, 0xd1052800, 0x46292300, 0x68e24620, 0xf916f000, 0x68ca4918, 0x029b2301,
0x60ca431a, 0xb570bd70, 0x460b460c, 0x46014606, 0xb0844810, 0x44484615, 0xf8bef000, 0xd10a2800,
0x90029001, 0x480b2101, 0x462b9100, 0x46314622, 0xf0004448, 0x4909f957, 0x230168ca, 0x431a029b,
0xb00460ca, 0x0000bd70, 0xd928c520, 0x40076000, 0x0000ffff, 0x00000004, 0x6b65666b, 0xf0003000,
0xd00a2800, 0x68c9492b, 0x0e094a2b, 0x447a0049, 0x03095a51, 0x2064d103, 0x20044770, 0xb4104770,
0x60032300, 0x21026041, 0x02896081, 0x492360c1, 0x158a7a0c, 0x610240a2, 0x61837ac9, 0xbc106141,
0x47704618, 0xd0022800, 0x20006181, 0x20044770, 0x28004770, 0x2004d101, 0xb4104770, 0x42191e5b,
0x421ad101, 0xbc10d002, 0x47702065, 0x428b6803, 0x6840d804, 0x18181889, 0xd2024288, 0x2066bc10,
0xbc104770, 0x47702000, 0x4288490d, 0x206bd001, 0x20004770, 0x28004770, 0x290fd008, 0x2a04d802,
0xe005d104, 0xd8012913, 0xd0012a08, 0x47702004, 0x47702000, 0x40075040, 0x00000512, 0x40020020,
0x6b65666b, 0x4605b5f8, 0x460c4616, 0xf7ff4618, 0x2800ffdb, 0x2308d12b, 0x46214632, 0xf7ff4628,
0x0007ffb8, 0x19a6d123, 0x1e7668e9, 0x91004630, 0xf922f000, 0xd0032900, 0x1c409e00, 0x1e764346,
0xd81342b4, 0x4478480a, 0x60046800, 0x20094909, 0xf00071c8, 0x4607f8f9, 0x280069a8, 0x4780d000,
0xd1032f00, 0x190468e8, 0xd9eb42b4, 0xbdf84638, 0x00000416, 0x40020000, 0xd1012a00, 0x47702004,
0x461cb5ff, 0x4615b081, 0x2304460e, 0x98014622, 0xff7ff7ff, 0xd11a0007, 0xd0172c00, 0x4478480d,
0x600e6801, 0x6800cd02, 0x490b6041, 0x71c82006, 0xf8caf000, 0x98014607, 0x28006980, 0x4780d000,
0xd1032f00, 0x1d361f24, 0xd1e72c00, 0xb0054638, 0x0000bdf0, 0x000003be, 0x40020000, 0x4604b510,
0xf7ff4608, 0x2800ff71, 0x2c00d106, 0x4904d005, 0x71c82044, 0xf8a8f000, 0x2004bd10, 0x0000bd10,
0x40020000, 0xb081b5ff, 0x460e4614, 0x23084605, 0xff3ff7ff, 0xd12a2800, 0x686868a9, 0xf8acf000,
0x42719000, 0x40014240, 0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a, 0xd90042a5, 0x490d4625,
0x447908e8, 0x600e6809, 0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804, 0xf876f000, 0xd1062800,
0x1b649800, 0x183f1976, 0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x0000031a, 0x40020000, 0xd00c2800,
0xd00a2a00, 0xd21a2908, 0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770,
0x6840e00a, 0x6880e008, 0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770,
0x00004770, 0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615, 0x9f082304, 0xfedbf7ff, 0xd1192800,
0xd0172d00, 0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1, 0x681172c1, 0x60886820, 0xf824f000,
0xd0082800, 0x29009907, 0x600ed000, 0xd0012f00, 0x60392100, 0x1d24bdf8, 0x1d361f2d, 0xd1e12d00,
0x0000bdf8, 0x00000276, 0x40020000, 0xd1012800, 0x47702004, 0x4803b510, 0x71c22240, 0xf0007181,
0xbd10f803, 0x40020000, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,
0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x09032200, 0xd373428b,
0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603, 0x2200d47f, 0x428b0843,
0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03, 0x22ffd30d, 0xba120209,
0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09, 0xd301428b, 0x1ac003cb,
0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b, 0x0b034152, 0xd301428b,
0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b, 0x1ac0028b, 0x0a434152,
0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152, 0x428b09c3, 0x01cbd301,
0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301, 0x41521ac0, 0x428b0903,
0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883, 0x008bd301, 0x41521ac0,
0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610, 0x0fcae05d, 0x4249d000,
0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b, 0x018922fc, 0x0a03ba12,
0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189, 0xe0001192, 0x09c30989,
0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152, 0xd301428b, 0x1ac0014b,
0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb, 0x08834152, 0xd301428b,
0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x46634601, 0x105b4152,
0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240, 0x46c02000, 0xbd0246c0,
0x40020004, 0x00000000, 0x00000000, 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x00000000,
0x00800000, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_unInit': 0x20000049,
'pc_program_page': 0x200000A7,
'pc_erase_sector': 0x20000075,
'pc_eraseAll' : 0x2000004D,
'static_base' : 0x20000000 + 0x00000020 + 0x00000624,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_size' : 0x00000200,
# All keys above are auto-generated. The following are added or modified.
'analyzer_supported' : True, # [modified] default is False
'analyzer_address' : 0x1fffa000, # [modified] default is zero. Use 8K block before flash algo. Can be any unused SRAM.
'page_buffers' : [0x20000a00, 0x20001200], # [added] Use areas above algo. Note 'begin_data' is unused if double buffering. Can be any unused SRAM.
'min_program_length' : 4 # [added] See FSL_FEATURE_FLASH_PFLASH_BLOCK_WRITE_UNIT_SIZE in KSDK features header file
};
class Flash_kl28z(Flash_Kinetis):
def __init__(self, target):
super(Flash_kl28z, self).__init__(target, flash_algo)
##
# This function sets up target clocks to ensure that flash is clocked at the maximum
# of 24MHz. Doing so gets the best flash programming performance. The FIRC clock source
# is used so that there is no dependency on an external crystal frequency.
def init(self):
super(Flash_kl28z, self).init()
# Enable FIRC.
value = self.target.read32(SCG_FIRCCSR)
value |= FIRCEN_MASK
self.target.write32(SCG_FIRCCSR, value)
# Switch system to FIRC, core=48MHz (/1), slow=24MHz (/2).
# Flash and the bus are clocked from the slow clock, and its max is 24MHz,
# so there is no benefit from raising the core clock further.
self.target.write32(SCG_RCCR, (0x3 << SCS_SHIFT) | (1 << DIVSLOW_SHIFT))
csr = self.target.read32(SCG_CSR)
logging.debug("SCG_CSR = 0x%08x", csr)
class KL28x(Kinetis):
singleMap = MemoryMap(
FlashRegion(name='flash', start=0, length=0x80000, blocksize=0x800, isBootMemory=True),
RamRegion(name='ram', start=0x1fff8000, length=0x20000),
RamRegion(name='usb ram', start=0x40100000, length=0x800)
)
dualMap = MemoryMap(
FlashRegion(name='flash', start=0, length=0x80000, blocksize=0x800, isBootMemory=True),
RomRegion(name='core1 imem alias', start=0x1d200000, length=0x40000),
RamRegion(name='core0 ram', start=0x1fffa000, length=0x18000),
RomRegion(name='core1 imem', start=0x2d200000, length=0x40000),
RamRegion(name='core1 dmem', start=0x2d300000, length=0x8000),
RamRegion(name='usb ram', start=0x40100000, length=0x800)
)
def __init__(self, link):
super(KL28x, self).__init__(link, self.singleMap)
self.mdm_idr = 0x001c0020
self.is_dual_core = False
self._svd_location = SVDFile(vendor="Freescale", filename="MKL28T7_CORE0.svd", is_local=False)
def init(self):
super(KL28x, self).init()
# Check if this is the dual core part.
sdid = self.readMemory(SIM_SDID)
keyattr = (sdid & SIM_SDID_KEYATTR_MASK) >> SIM_SDID_KEYATTR_SHIFT
logging.debug("KEYATTR=0x%x SDID=0x%08x", keyattr, sdid)
self.is_dual_core = (keyattr == KEYATTR_DUAL_CORE)
if self.is_dual_core:
self.memory_map = self.dualMap
logging.info("KL28 is dual core")
# Add second core's AHB-AP.
self.core1_ap = ap.AHB_AP(self.dp, 2)
self.aps[2] = self.core1_ap
self.core1_ap.init(True)
# Add second core. It is held in reset until released by software.
self.core1 = CortexM(self.link, self.dp, self.core1_ap, self.memory_map, core_num=1)
self.cores[1] = self.core1
self.core1.init()
# Disable ROM vector table remapping.
self.write32(RCM_MR, RCM_MR_BOOTROM_MASK)
``` |
[
{
"content": "```python\n# Copyright (c) 2012 OpenStack, LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.or... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2012 OpenStack, LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http... | ```python
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from numbers import Number
import os
import re
import shutil
import six
import socket
import time
import unittest
import pexpect
from proboscis import test
from proboscis.asserts import assert_raises
from proboscis.decorators import expect_exception
from proboscis.decorators import time_out
from trove.tests.config import CONFIG
from trove.common.utils import poll_until
from trove.tests.util import process
from trove.common.utils import import_class
from tests import initialize
WHITE_BOX = CONFIG.white_box
VOLUMES_DRIVER = "trove.volumes.driver"
if WHITE_BOX:
# TODO(tim.simpson): Restore this once white box functionality can be
# added back to this test module.
pass
# from nova import context
# from nova import exception
# from nova import flags
# from nova import utils
# from trove import exception as trove_exception
# from trove.utils import poll_until
# from trove import volume
# from trove.tests.volume import driver as test_driver
# FLAGS = flags.FLAGS
UUID_PATTERN = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{4}-[0-9a-f]{12}$')
HUGE_VOLUME = 5000
def is_uuid(text):
return UUID_PATTERN.search(text) is not None
class StoryDetails(object):
def __init__(self):
self.api = volume.API()
self.client = volume.Client()
self.context = context.get_admin_context()
self.device_path = None
self.volume_desc = None
self.volume_id = None
self.volume_name = None
self.volume = None
self.host = socket.gethostname()
self.original_uuid = None
self.original_device_info = None
self.resize_volume_size = 2
def get_volume(self):
return self.api.get(self.context, self.volume_id)
@property
def mount_point(self):
return "%s/%s" % (LOCAL_MOUNT_PATH, self.volume_id)
@property
def test_mount_file_path(self):
return "%s/test.txt" % self.mount_point
story = None
storyFail = None
LOCAL_MOUNT_PATH = "/testsmnt"
class VolumeTest(unittest.TestCase):
"""This test tells the story of a volume, from cradle to grave."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def setUp(self):
global story, storyFail
self.story = story
self.storyFail = storyFail
def assert_volume_as_expected(self, volume):
self.assertIsInstance(volume["id"], Number)
self.assertEqual(self.story.volume_name, volume["display_name"])
self.assertEqual(self.story.volume_desc, volume["display_description"])
self.assertEqual(1, volume["size"])
self.assertEqual(self.story.context.user_id, volume["user_id"])
self.assertEqual(self.story.context.project_id, volume["project_id"])
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[initialize.start_volume])
class SetUp(VolumeTest):
def test_05_create_story(self):
"""Creating 'story' vars used by the rest of these tests."""
global story, storyFail
story = StoryDetails()
storyFail = StoryDetails()
@time_out(60)
def test_10_wait_for_topics(self):
"""Wait until the volume topic is up before proceeding."""
topics = ["volume"]
from tests.util.topics import hosts_up
while not all(hosts_up(topic) for topic in topics):
pass
def test_20_refresh_local_folders(self):
"""Delete the local folders used as mount locations if they exist."""
if os.path.exists(LOCAL_MOUNT_PATH):
#TODO(rnirmal): Also need to remove any existing mounts.
shutil.rmtree(LOCAL_MOUNT_PATH)
os.mkdir(LOCAL_MOUNT_PATH)
# Give some time for the services to startup
time.sleep(10)
@time_out(60)
def test_30_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
story_context = self.story.context
device_info = self.story.api.get_storage_device_info(story_context)
print("device_info : %r" % device_info)
self.assertNotEqual(device_info, None,
"the storage device information should exist")
self.story.original_device_info = device_info
@time_out(60)
def test_31_mgmt_volume_info(self):
"""Check the available space against the mgmt API info."""
story_context = self.story.context
device_info = self.story.api.get_storage_device_info(story_context)
print("device_info : %r" % device_info)
info = {'spaceTotal': device_info['raw_total'],
'spaceAvail': device_info['raw_avail']}
self._assert_available_space(info)
def _assert_available_space(self, device_info, fail=False):
"""
Give the SAN device_info(fake or not) and get the asserts for free
"""
print("DEVICE_INFO on SAN : %r" % device_info)
# Calculate the GBs; Divide by 2 for the FLAGS.san_network_raid_factor
gbs = 1.0 / 1024 / 1024 / 1024 / 2
total = int(device_info['spaceTotal']) * gbs
free = int(device_info['spaceAvail']) * gbs
used = total - free
usable = total * (FLAGS.san_max_provision_percent * 0.01)
real_free = float(int(usable - used))
print("total : %r" % total)
print("free : %r" % free)
print("used : %r" % used)
print("usable : %r" % usable)
print("real_free : %r" % real_free)
check_space = self.story.api.check_for_available_space
self.assertFalse(check_space(self.story.context, HUGE_VOLUME))
self.assertFalse(check_space(self.story.context, real_free + 1))
if fail:
self.assertFalse(check_space(self.story.context, real_free))
self.assertFalse(check_space(self.story.context, real_free - 1))
self.assertFalse(check_space(self.story.context, 1))
else:
self.assertTrue(check_space(self.story.context, real_free))
self.assertTrue(check_space(self.story.context, real_free - 1))
self.assertTrue(check_space(self.story.context, 1))
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
class AddVolumeFailure(VolumeTest):
@time_out(60)
def test_add(self):
"""
Make call to FAIL a prov. volume and assert the return value is a
FAILURE.
"""
self.assertIsNone(self.storyFail.volume_id)
name = "TestVolume"
desc = "A volume that was created for testing."
self.storyFail.volume_name = name
self.storyFail.volume_desc = desc
volume = self.storyFail.api.create(self.storyFail.context,
size=HUGE_VOLUME,
snapshot_id=None, name=name,
description=desc)
self.assertEqual(HUGE_VOLUME, volume["size"])
self.assertTrue("creating", volume["status"])
self.assertTrue("detached", volume["attach_status"])
self.storyFail.volume = volume
self.storyFail.volume_id = volume["id"]
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolumeFailure])
class AfterVolumeFailureIsAdded(VolumeTest):
"""Check that the volume can be retrieved via the API, and setup.
All we want to see returned is a list-like with an initial string.
"""
@time_out(120)
def test_api_get(self):
"""Wait until the volume is a FAILURE."""
volume = poll_until(lambda: self.storyFail.get_volume(),
lambda volume: volume["status"] != "creating")
self.assertEqual(volume["status"], "error")
self.assertTrue(volume["attach_status"], "detached")
@time_out(60)
def test_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
info = self.story.api.get_storage_device_info(self.story.context)
print("device_info : %r" % info)
self.assertNotEqual(info, None,
"the storage device information should exist")
self.assertEqual(self.story.original_device_info['raw_total'],
info['raw_total'])
self.assertEqual(self.story.original_device_info['raw_avail'],
info['raw_avail'])
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
class AddVolume(VolumeTest):
@time_out(60)
def test_add(self):
"""Make call to prov. a volume and assert the return value is OK."""
self.assertIsNone(self.story.volume_id)
name = "TestVolume"
desc = "A volume that was created for testing."
self.story.volume_name = name
self.story.volume_desc = desc
volume = self.story.api.create(self.story.context, size=1,
snapshot_id=None, name=name,
description=desc)
self.assert_volume_as_expected(volume)
self.assertTrue("creating", volume["status"])
self.assertTrue("detached", volume["attach_status"])
self.story.volume = volume
self.story.volume_id = volume["id"]
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolume])
class AfterVolumeIsAdded(VolumeTest):
"""Check that the volume can be retrieved via the API, and setup.
All we want to see returned is a list-like with an initial string.
"""
@time_out(120)
def test_api_get(self):
"""Wait until the volume is finished provisioning."""
volume = poll_until(lambda: self.story.get_volume(),
lambda volume: volume["status"] != "creating")
self.assertEqual(volume["status"], "available")
self.assert_volume_as_expected(volume)
self.assertTrue(volume["attach_status"], "detached")
@time_out(60)
def test_mgmt_volume_check(self):
"""Get the volume information from the mgmt API"""
print("self.story.original_device_info : %r" %
self.story.original_device_info)
info = self.story.api.get_storage_device_info(self.story.context)
print("device_info : %r" % info)
self.assertNotEqual(info, None,
"the storage device information should exist")
self.assertEqual(self.story.original_device_info['raw_total'],
info['raw_total'])
volume_size = int(self.story.volume['size']) * (1024 ** 3) * 2
print("volume_size: %r" % volume_size)
print("self.story.volume['size']: %r" % self.story.volume['size'])
avail = int(self.story.original_device_info['raw_avail']) - volume_size
print("avail space: %r" % avail)
self.assertEqual(int(info['raw_avail']), avail)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AfterVolumeIsAdded])
class SetupVolume(VolumeTest):
@time_out(60)
def test_assign_volume(self):
"""Tell the volume it belongs to this host node."""
#TODO(tim.simpson) If this is important, could we add a test to
# make sure some kind of exception is thrown if it
# isn't added to certain drivers?
self.assertNotEqual(None, self.story.volume_id)
self.story.api.assign_to_compute(self.story.context,
self.story.volume_id,
self.story.host)
@time_out(60)
def test_setup_volume(self):
"""Set up the volume on this host. AKA discovery."""
self.assertNotEqual(None, self.story.volume_id)
device = self.story.client._setup_volume(self.story.context,
self.story.volume_id,
self.story.host)
if not isinstance(device, six.string_types):
self.fail("Expected device to be a string, but instead it was " +
str(type(device)) + ".")
self.story.device_path = device
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetupVolume])
class FormatVolume(VolumeTest):
@expect_exception(IOError)
@time_out(60)
def test_10_should_raise_IOError_if_format_fails(self):
"""
Tests that if the driver's _format method fails, its
public format method will perform an assertion properly, discover
it failed, and raise an exception.
"""
volume_driver_cls = import_class(FLAGS.volume_driver)
class BadFormatter(volume_driver_cls):
def _format(self, device_path):
pass
bad_client = volume.Client(volume_driver=BadFormatter())
bad_client._format(self.story.device_path)
@time_out(60)
def test_20_format(self):
self.assertNotEqual(None, self.story.device_path)
self.story.client._format(self.story.device_path)
def test_30_check_options(self):
cmd = ("sudo dumpe2fs -h %s 2> /dev/null | "
"awk -F ':' '{ if($1 == \"Reserved block count\") "
"{ rescnt=$2 } } { if($1 == \"Block count\") "
"{ blkcnt=$2 } } END { print (rescnt/blkcnt)*100 }'")
cmd = cmd % self.story.device_path
out, err = process(cmd)
self.assertEqual(float(5), round(float(out)), msg=out)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[FormatVolume])
class MountVolume(VolumeTest):
@time_out(60)
def test_mount(self):
self.story.client._mount(self.story.device_path,
self.story.mount_point)
with open(self.story.test_mount_file_path, 'w') as file:
file.write("Yep, it's mounted alright.")
self.assertTrue(os.path.exists(self.story.test_mount_file_path))
def test_mount_options(self):
cmd = "mount -l | awk '/%s.*noatime/ { print $1 }'"
cmd %= LOCAL_MOUNT_PATH.replace('/', '')
out, err = process(cmd)
self.assertEqual(os.path.realpath(self.story.device_path), out.strip(),
msg=out)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
class ResizeVolume(VolumeTest):
@time_out(300)
def test_resize(self):
self.story.api.resize(self.story.context, self.story.volume_id,
self.story.resize_volume_size)
volume = poll_until(lambda: self.story.get_volume(),
lambda volume: volume["status"] == "resized")
self.assertEqual(volume["status"], "resized")
self.assertTrue(volume["attach_status"], "attached")
self.assertTrue(volume['size'], self.story.resize_volume_size)
@time_out(300)
def test_resizefs_rescan(self):
self.story.client.resize_fs(self.story.context,
self.story.volume_id)
expected = "trove.tests.volume.driver.ISCSITestDriver"
if FLAGS.volume_driver is expected:
size = self.story.resize_volume_size * \
test_driver.TESTS_VOLUME_SIZE_MULTIPLIER * 1024 * 1024
else:
size = self.story.resize_volume_size * 1024 * 1024
out, err = process('sudo blockdev --getsize64 %s' %
os.path.realpath(self.story.device_path))
if int(out) < (size * 0.8):
self.fail("Size %s is not more or less %s" % (out, size))
# Reset the volume status to available
self.story.api.update(self.story.context, self.story.volume_id,
{'status': 'available'})
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
class UnmountVolume(VolumeTest):
@time_out(60)
def test_unmount(self):
self.story.client._unmount(self.story.mount_point)
child = pexpect.spawn("sudo mount %s" % self.story.mount_point)
child.expect("mount: can't find %s in" % self.story.mount_point)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[UnmountVolume])
class GrabUuid(VolumeTest):
@time_out(60)
def test_uuid_must_match_pattern(self):
"""UUID must be hex chars in the form 8-4-4-4-12."""
client = self.story.client # volume.Client()
device_path = self.story.device_path # '/dev/sda5'
uuid = client.get_uuid(device_path)
self.story.original_uuid = uuid
self.assertTrue(is_uuid(uuid), "uuid must match regex")
@time_out(60)
def test_get_invalid_uuid(self):
"""DevicePathInvalidForUuid is raised if device_path is wrong."""
client = self.story.client
device_path = "gdfjghsfjkhggrsyiyerreygghdsghsdfjhf"
self.assertRaises(trove_exception.DevicePathInvalidForUuid,
client.get_uuid, device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
class RemoveVolume(VolumeTest):
@time_out(60)
def test_remove(self):
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
self.assertRaises(Exception,
self.story.client._format, self.story.device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
class Initialize(VolumeTest):
@time_out(300)
def test_10_initialize_will_format(self):
"""initialize will setup, format, and store the UUID of a volume"""
self.assertTrue(self.story.get_volume()['uuid'] is None)
self.story.client.initialize(self.story.context, self.story.volume_id,
self.story.host)
volume = self.story.get_volume()
self.assertTrue(is_uuid(volume['uuid']), "uuid must match regex")
self.assertNotEqual(self.story.original_uuid, volume['uuid'],
"Validate our assumption that the volume UUID "
"will change when the volume is formatted.")
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
@time_out(60)
def test_20_initialize_the_second_time_will_not_format(self):
"""If initialize is called but a UUID exists, it should not format."""
old_uuid = self.story.get_volume()['uuid']
self.assertTrue(old_uuid is not None)
class VolumeClientNoFmt(volume.Client):
def _format(self, device_path):
raise RuntimeError("_format should not be called!")
no_fmt_client = VolumeClientNoFmt()
no_fmt_client.initialize(self.story.context, self.story.volume_id,
self.story.host)
self.assertEqual(old_uuid, self.story.get_volume()['uuid'],
"UUID should be the same as no formatting occurred.")
self.story.client.remove_volume(self.story.context,
self.story.volume_id,
self.story.host)
def test_30_check_device_exists(self):
assert_raises(exception.InvalidDevicePath, self.story.client._format,
self.story.device_path)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[Initialize])
class DeleteVolume(VolumeTest):
@time_out(60)
def test_delete(self):
self.story.api.delete(self.story.context, self.story.volume_id)
@test(groups=[VOLUMES_DRIVER], depends_on_classes=[DeleteVolume])
class ConfirmMissing(VolumeTest):
@time_out(60)
def test_discover_should_fail(self):
try:
self.story.client.driver.discover_volume(self.story.context,
self.story.volume)
self.fail("Expecting an error but did not get one.")
except exception.Error:
pass
except trove_exception.ISCSITargetNotDiscoverable:
pass
@time_out(60)
def test_get_missing_volume(self):
try:
volume = poll_until(lambda: self.story.api.get(self.story.context,
self.story.volume_id),
lambda volume: volume["status"] != "deleted")
self.assertEqual(volume["deleted"], False)
except exception.VolumeNotFound:
pass
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.ap... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | ```python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Carry out voice commands by recognising keywords."""
import actionbase
import datetime
import entity
import logging
import random
import re
import subprocess
from twilio.rest import Client
import yaml
# =============================================================================
#
# Hey, Makers!
#
# This file contains some examples of voice commands that are handled locally,
# right on your Raspberry Pi.
#
# Do you want to add a new voice command? Check out the instructions at:
# https://aiyprojects.withgoogle.com/voice/#makers-guide-3-3--create-a-new-voice-command-or-action
# (MagPi readers - watch out! You should switch to the instructions in the link
# above, since there's a mistake in the MagPi instructions.)
#
# In order to make a new voice command, you need to do two things. First, make a
# new action where it says:
# "Implement your own actions here"
# Secondly, add your new voice command to the actor near the bottom of the file,
# where it says:
# "Add your own voice commands here"
#
# =============================================================================
# Actions might not use the user's command. pylint: disable=unused-argument
# Example: Say a simple response
# ================================
#
# This example will respond to the user by saying something. You choose what it
# says when you add the command below - look for SpeakAction at the bottom of
# the file.
#
# There are two functions:
# __init__ is called when the voice commands are configured, and stores
# information about how the action should work:
# - self.say is a function that says some text aloud.
# - self.words are the words to use as the response.
# run is called when the voice command is used. It gets the user's exact voice
# command as a parameter.
class SpeakAction(object):
"""Says the given text via TTS."""
def __init__(self, say, words):
self.say = say
self.words = words
def run(self, voice_command):
self.say(self.words)
# Example: Tell the current time
# ==============================
#
# This example will tell the time aloud. The to_str function will turn the time
# into helpful text (for example, "It is twenty past four."). The run function
# uses to_str say it aloud.
class SpeakTime(object):
"""Says the current local time with TTS."""
def __init__(self, say):
self.say = say
def run(self, voice_command):
time_str = self.to_str(datetime.datetime.now())
self.say(time_str)
def to_str(self, dt):
"""Convert a datetime to a human-readable string."""
HRS_TEXT = ['midnight', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve']
MINS_TEXT = ["five", "ten", "quarter", "twenty", "twenty-five", "half"]
hour = dt.hour
minute = dt.minute
# convert to units of five minutes to the nearest hour
minute_rounded = (minute + 2) // 5
minute_is_inverted = minute_rounded > 6
if minute_is_inverted:
minute_rounded = 12 - minute_rounded
hour = (hour + 1) % 24
# convert time from 24-hour to 12-hour
if hour > 12:
hour -= 12
if minute_rounded == 0:
if hour == 0:
return 'It is midnight.'
return "It is %s o'clock." % HRS_TEXT[hour]
if minute_is_inverted:
return 'It is %s to %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
return 'It is %s past %s.' % (MINS_TEXT[minute_rounded - 1], HRS_TEXT[hour])
# Example: Run a shell command and say its output
# ===============================================
#
# This example will use a shell command to work out what to say. You choose the
# shell command when you add the voice command below - look for the example
# below where it says the IP address of the Raspberry Pi.
class SpeakShellCommandOutput(object):
"""Speaks out the output of a shell command."""
def __init__(self, say, shell_command, failure_text):
self.say = say
self.shell_command = shell_command
self.failure_text = failure_text
def run(self, voice_command):
output = subprocess.check_output(self.shell_command, shell=True).strip()
if output:
self.say(output)
elif self.failure_text:
self.say(self.failure_text)
# Example: Change the volume
# ==========================
#
# This example will can change the speaker volume of the Raspberry Pi. It uses
# the shell command SET_VOLUME to change the volume, and then GET_VOLUME gets
# the new volume. The example says the new volume aloud after changing the
# volume.
class VolumeControl(object):
"""Changes the volume and says the new level."""
GET_VOLUME = r'amixer get Master | grep "Front Left:" | sed "s/.*\[\([0-9]\+\)%\].*/\1/"'
SET_VOLUME = 'amixer -q set Master %d%%'
def __init__(self, say, change):
self.say = say
self.change = change
def run(self, voice_command):
res = subprocess.check_output(VolumeControl.GET_VOLUME, shell=True).strip()
try:
logging.info("volume: %s", res)
vol = int(res) + self.change
vol = max(0, min(100, vol))
subprocess.call(VolumeControl.SET_VOLUME % vol, shell=True)
self.say(_('Volume at %d %%.') % vol)
except (ValueError, subprocess.CalledProcessError):
self.say(_('Sorry, I couldn\'t do that'))
logging.exception("Error using amixer to adjust volume.")
# Example: Repeat after me
# ========================
#
# This example will repeat what the user said. It shows how you can access what
# the user said, and change what you do or how you respond.
class RepeatAfterMe(object):
"""Repeats the user's command."""
def __init__(self, say, keyword):
self.say = say
self.keyword = keyword
def run(self, voice_command):
# The command still has the 'repeat after me' keyword, so we need to
# remove it before saying whatever is left.
to_repeat = voice_command.replace(self.keyword, '', 1)
self.say(to_repeat)
# =========================================
# Makers! Implement your own actions here.
# =========================================
def make_actor(say):
"""Create an actor to carry out the user's commands."""
entities = entity.Entities('/home/pi/entities2.yml')
actor = actionbase.Actor()
if False:
actor.add_keyword(
_('ip address'), SpeakShellCommandOutput(
say, "ip -4 route get 1 | head -1 | cut -d' ' -f8",
_('I do not have an ip address assigned to me.')))
actor.add_keyword(_('volume up'), VolumeControl(say, 10))
actor.add_keyword(_('volume down'), VolumeControl(say, -10))
actor.add_keyword(_('max volume'), VolumeControl(say, 100))
actor.add_keyword(_('repeat after me'),
RepeatAfterMe(say, _('repeat after me')))
# =========================================
# Makers! Add your own voice commands here.
# =========================================
for item in entities.units.values():
unit = entity.Unit(item)
logging.debug(unit)
actor.add_keyword(unit.synonyms, PageUnit(say, unit))
for item in entities.tenants.values():
tenant = entity.Tenant(item)
logging.debug(tenant)
actor.add_keyword(tenant.synonyms, PageTenant(say, tenant))
actor.add_keyword(GainEntry.synonyms, GainEntry(say, entities.tenants))
actor.add_keyword(RequestPassword.synonyms,
RequestPassword(say, entities.tenants))
return actor
class Messenger:
with open('/home/pi/twilio.yml') as f:
config = yaml.load(f)
account_sid = config['account_sid']
auth_token = config['auth_token']
from_ = config['from']
def __init__(self):
self.client = Client(self.account_sid, self.auth_token)
def send_text(self, to, msg):
message = self.client.messages.create(to=to,
from_=self.from_,
body=msg)
logging.debug(message.sid)
class Intent:
''' may contain a context. context will be,
for example, RequestPassword intent '''
def __init__(self, context=None):
self.context = context
class RequestPassword(Intent):
synonyms = [
"password",
'forgot', # TODO: these are too general
'help',
]
responses = [
"ok, $tenant, i've sent you a new password",
"ok, $tenant, check your messages",
"you got it, $tenant, i just texted you a new one.",
]
token = '$tenant'
def __init__(self, say, tenants, context=None):
super().__init__(context)
self.say = say
# TODO: Need to query which tenant this is. use context
self.tenant = entity.Tenant(tenants['Bryan'])
def run(self, voice_command):
Password(self.tenant).new_word()
response = random.choice(self.responses)
response = response.replace(self.token,
self.tenant.name)
self.say(response)
WORDS_FILE="/home/pi/CROSSWD.TXT"
class Password:
''' this is an action class '''
messenger = Messenger()
with open(WORDS_FILE, "r") as f:
words = f.readlines()
def __init__(self, tenant):
self.tenant = tenant
def new_word(self):
''' 1. generate and persist new tenant-specific password
2. send new password to tenant '''
self.tenant.password = random.choice(self.words).rstrip()
# logger.debug(self.tenant.word)
# TODO: persist it
self.messenger.send_text(self.tenant.phone_no,
self.tenant.password)
return self.tenant.password
class GainEntry(Intent):
query_responses = [
'hello, $tenant, please state the password',
'good day, $tenant, what is your password?',
'howdy, $tenant! your password, please',
]
pass_responses = [
'hello, $tenant, have a wonderful day',
'good day, $tenant, nice to see you',
'howdy, $tenant! another lovely day in the city',
]
fail_responses = [
'sorry, i could not let you in',
'authorization has been denied. please try again',
"please check your password, it didn't match",
"try spelling it out slowly next time",
]
synonyms = [
'let me in',
'open up',
'open the door',
'knock knock',
]
token = '$tenant'
def __init__(self, say, tenants):
self.say = say
# TODO: Get which tenant it is based on the password used
self.tenant = entity.Tenant(tenants['Bryan'])
def run(self, voice_command):
passed = False
# TODO:
# 1. listen for the password
# 2. match the result to self.tenant.password
# 3. on a match, give a greeting and buzz the door
# TODO TEST CODE
if self.tenant.password in voice_command.lower():
response = 'that is the correct password: %s. '%(
self.tenant.password)
response += random.choice(self.pass_responses)
passed = True
else:
response = "i didn't recognize the password. "
response += random.choice(self.fail_responses)
response = response.replace(self.token, self.tenant.name)
self.say(response)
if passed:
# do this at the end to reduce delay
Password(self.tenant).new_word()
# TODO TEST CODE
return 1
class PageUnit(Intent):
entity = '$unit'
responses = [
'please wait. paging unit $unit',
'ok. paging unit $unit',
'hang on while i page unit $unit',
'you got it. paging unit $unit',
]
def __init__(self, say, unit):
self.unit = unit
self.say = say
if unit.paging_exception:
self.responses = unit.paging_exception.message
def say_unit(self, unit):
''' Pronounce unit number more like English
E.g.: pronounce '453' as 'four fifty three' instead of
'four hundred fifty three'
'''
try:
val = int(re.findall(r'\d+', unit)[0])
suf = ''
match = re.match(r"([0-9]+)([a-z]+)", unit, re.I)
if match:
n, suf = match.groups()
hun = int(val / 100)
rem = int(val - (hun * 100))
ret = '%s %s %s'%(hun, rem, suf)
except Exception as e:
return unit
return ret
def run(self, voice_command):
response = random.choice(self.responses)
response = response.replace(self.entity,
self.say_unit(self.unit.address))
self.say(response)
if self.unit.paging_exception:
return self.unit.paging_exception.run()
# TODO: page the unit!
return 1
class PageTenant(Intent):
''' in api.ai, the entity is @tenant. The entity value is $tenant
not sure what the distinction is for '''
entity = '$tenant'
responses = [
'ok, paging $tenant',
'ok, please stand by while i page $tenant',
'sure, please wait while i page $tenant',
'paging $tenant, please stand by',
'ok, hang on while i page $tenant',
]
def __init__(self, say, tenant):
self.say = say
self.tenant = tenant
if tenant.paging_exception:
self.responses = tenant.paging_exception.message
def run(self, voice_command):
response = random.choice(self.responses)
self.say(response.replace(self.entity, self.tenant.name))
if self.tenant.paging_exception:
return self.tenant.paging_exception.run()
# TODO: page the tenant!
return 1
def add_commands_just_for_cloud_speech_api(actor, say):
"""Add simple commands that are only used with the Cloud Speech API."""
def simple_command(keyword, response):
actor.add_keyword(keyword, SpeakAction(say, response))
simple_command('alexa', _("We've been friends since we were both starter projects"))
simple_command(
'beatbox',
'pv zk pv pv zk pv zk kz zk pv pv pv zk pv zk zk pzk pzk pvzkpkzvpvzk kkkkkk bsch')
simple_command(_('clap'), _('clap clap'))
simple_command('google home', _('She taught me everything I know.'))
simple_command(_('hello'), _('hello to you too'))
simple_command(_('tell me a joke'),
_('What do you call an alligator in a vest? An investigator.'))
simple_command(_('three laws of robotics'),
_("""The laws of robotics are
0: A robot may not injure a human being or, through inaction, allow a human
being to come to harm.
1: A robot must obey orders given it by human beings except where such orders
would conflict with the First Law.
2: A robot must protect its own existence as long as such protection does not
conflict with the First or Second Law."""))
simple_command(_('where are you from'), _("A galaxy far, far, just kidding. I'm from Seattle."))
simple_command(_('your name'), _('A machine has no name'))
actor.add_keyword(_('time'), SpeakTime(say))
``` |
[
{
"content": "Here is some code:\n```python\nimport os\nimport argparse\nfrom PIL import Image\n\ndef is_image(file_name):\n if \".png\" in file_name or \".jpg\" in file_name:\n return True\n else:\n return False\n\ndef resize_image(input_path, output_path, wd=0, ht=0):\n img = Image.open... | [
{
"content": "Here is some code:\n<|memory_start|>```python\nimport os\nimport argparse\nfrom PIL import Image\n\ndef is_image(file_name):\n if \".png\" in file_name or \".jpg\" in file_name:\n return True\n else:\n return False\n\ndef resize_image(input_path, output_path, wd=0, ht=0):\n ... | ```python
import os
import argparse
from PIL import Image
def is_image(file_name):
if ".png" in file_name or ".jpg" in file_name:
return True
else:
return False
def resize_image(input_path, output_path, wd=0, ht=0):
img = Image.open(input_path)
# Resize the image
sz = img.size
if wd == 0:
wd = sz[0] / 2
if ht == 0:
ht = sz[1] / 2
ratio_dst = float(wd) / float(ht)
ratio_src = float(sz[0]) / float(sz[1])
if ratio_src > ratio_dst:
wd_new = int(ht * ratio_src)
img = img.resize((wd_new, int(ht)), Image.ANTIALIAS)
else:
ht_new = int(wd / ratio_src)
img = img.resize((int(wd), ht_new), Image.ANTIALIAS)
# Crop the image
sz = img.size
px = 0
py = 0
if sz[0] > wd:
px = (sz[0] - wd) / 2
if sz[1] > ht:
py = (sz[1] - ht) / 2
if px > 0 or py > 0:
img = img.crop((px, py, px + wd, py + ht))
# Save the output image
img.save(output_path)
def resize_images(input_path, output_path, wd=0, ht=0):
if not os.path.exists(output_path):
os.makedirs(output_path)
count = 0
for path, subdirs, files in os.walk(input_path):
for file_name in files:
if not is_image(file_name):
continue
input_file_path = os.path.join(path, file_name)
output_file_path = os.path.join(output_path, file_name)
resize_image(input_file_path, output_file_path, wd, ht)
count = count + 1
return count
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description='Resize images in a folder.')
parser.add_argument('input_path', type=str, help='Path to the input folder')
parser.add_argument('output_path', type=str, help='Path to the output folder')
parser.add_argument('--wd', type=int, default=0, help='Target width')
parser.add_argument('--ht', type=int, default=0, help='Target height')
args = parser.parse_args()
input_path = args.input_path
output_path = args.output_path
wd = args.wd
ht = args.ht
resize_images(input_path, output_path, wd, ht)
``` |
[
{
"content": "```python\n#!/usr/bin/env python3\n# Copyright (c) 2014-2018 The Energi Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed u... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python3\n# Copyright (c) 2014-2018 The Energi Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n# Copyright (c) 2014-2016 The Bitcoin Core developers\... | ```python
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj, True)
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n\"\"\"\n:mod: DataManager\n\n.. module: DataManager\n\n:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.\n\nThis module consists of DataManager and related classes.\n\n\"\"\"\n\n\n# # RSCID\n__RCSID__ = \"$Id$\"\n# ... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n\"\"\"\n:mod: DataManager\n\n.. module: DataManager\n\n:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.\n\nThis module consists of DataManager and related classes.\n\n\"\"\"\n\n\n# # RSCID\n__RCSID_... | ```python
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # RSCID
__RCSID__ = "$Id$"
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
class DataManager( object ):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__( self, catalogs = [], masterCatalogOnly = False, vo = False ):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.vo = vo
catalogsToUse = FileCatalog( vo = self.vo ).getMasterCatalogNames()['Value'] if masterCatalogOnly else catalogs
self.fc = FileCatalog( catalogs = catalogsToUse, vo = self.vo )
self.accountingClient = None
self.registrationProtocol = ['SRM2', 'DIP']
self.thirdPartyProtocols = ['SRM2', 'DIP']
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations( self.vo ).getValue( 'DataManagement/IgnoreMissingInFC', False )
self.useCatalogPFN = Operations( self.vo ).getValue( 'DataManagement/UseCatalogPFN', True )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyOperationWritePermission( self, path ):
""" Check if we have write permission to the given directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
res = self.fc.getPathPermissions( paths )
if not res['OK']:
return res
for path in paths:
if not res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
return S_OK( False )
return S_OK( True )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.debug( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.debug( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyOperationWritePermission( folder )
if not res['OK']:
return res
if not res['Value']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.debug( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
gLogger.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = returnSingleResult( self.fc.removeDirectory( folder, recursive = True ) )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement( storageElement )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.debug( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.debug( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.debug( "Failed to remove storage directory", res['Message'] )
return res
self.log.debug( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.debug( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( self.fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Problem getting the %s directory content" % currentDir, res['Message'] )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.debug( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.debug( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.debug( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult( self.fc.listDirectory( currentDir, verbose = ( days != 0 ) ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.debug( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fc.getFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
gDataStoreClient.commit()
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.debug( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
se = StorageElement( storageElementName )
physicalFile = replicas[storageElementName]
oDataOperation = self.__initialiseAccountingObject( 'getFile', storageElementName, 1 )
oDataOperation.setStartTime()
startTime = time.time()
res = returnSingleResult( se.getFile( physicalFile, localPath = os.path.realpath( destinationDir ) ) )
getTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', getTime )
if not res['OK']:
self.log.debug( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
else:
oDataOperation.setValueByKey( 'TransferSize', res['Value'] )
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
return S_OK( localFile )
gDataStoreClient.addRegister( oDataOperation )
self.log.debug( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "DataManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# ancestors = ancestors if ancestors else list()
res = self.__verifyOperationWritePermission( os.path.dirname( lfn ) )
if not res['OK']:
return res
if not res['Value']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.debug( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.debug( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fc.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completey failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.debug( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.debug( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = returnSingleResult( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destPfn, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destPfn, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.debug( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "DataManager.replicateAndRegister: Completely failed to replicate file."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.debug( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.debug( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.debug( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.debug( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.debug( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
"""
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfn )
if not res['OK']:
return res
if not res['Value']:
errStr = "__replicate: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "__replicate: Performing replication initialization." )
res = self.__initializeReplication( lfn, sourceSE, destSE )
if not res['OK']:
self.log.debug( "__replicate: Replication initialisation failed.", lfn )
return res
destStorageElement = res['Value']['DestStorage']
lfnReplicas = res['Value']['Replicas']
destSE = res['Value']['DestSE']
catalogueSize = res['Value']['CatalogueSize']
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSE in lfnReplicas:
self.log.debug( "__replicate: LFN is already registered at %s." % destSE )
return S_OK()
###########################################################
# Resolve the best source storage elements for replication
self.log.debug( "__replicate: Determining the best source replicas." )
res = self.__resolveBestReplicas( lfn, sourceSE, lfnReplicas, catalogueSize )
if not res['OK']:
self.log.debug( "__replicate: Best replica resolution failed.", lfn )
return res
replicaPreference = res['Value']
###########################################################
# Now perform the replication for the file
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
res = returnSingleResult( destStorageElement.getPfnForLfn( destPath ) )
if not res['OK']:
errStr = "__replicate: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
# Find out if there is a replica already at the same site
localReplicas = []
otherReplicas = []
for sourceSE, sourcePfn in replicaPreference:
if sourcePfn == destPfn:
continue
res = isSameSiteSE( sourceSE, destSE )
if res['OK'] and res['Value']:
localReplicas.append( ( sourceSE, sourcePfn ) )
else:
otherReplicas.append( ( sourceSE, sourcePfn ) )
replicaPreference = localReplicas + otherReplicas
for sourceSE, sourcePfn in replicaPreference:
self.log.debug( "__replicate: Attempting replication from %s to %s." % ( sourceSE, destSE ) )
fileDict = {destPfn:sourcePfn}
if sourcePfn == destPfn:
continue
localFile = ''
#FIXME: this should not be hardcoded!!!
if sourcePfn.find( 'srm' ) == -1 or destPfn.find( 'srm' ) == -1:
# No third party transfer is possible, we have to replicate through the local cache
localDir = '.'
if localCache:
localDir = localCache
self.getFile( lfn, destinationDir = localDir )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
fileDict = {destPfn:localFile}
res = destStorageElement.replicateFile( fileDict, sourceSize = catalogueSize, singleFile = True )
if localFile and os.path.exists( localFile ):
os.remove( localFile )
if res['OK']:
self.log.debug( "__replicate: Replication successful." )
resDict = {'DestSE':destSE, 'DestPfn':destPfn}
return S_OK( resDict )
else:
errStr = "__replicate: Replication failed."
self.log.debug( errStr, "%s from %s to %s." % ( lfn, sourceSE, destSE ) )
##########################################################
# If the replication failed for all sources give up
errStr = "__replicate: Failed to replicate with all sources."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
def __initializeReplication( self, lfn, sourceSE, destSE ):
# Horrible, but kept to not break current log messages
logStr = "__initializeReplication:"
###########################################################
# Check the sourceSE if specified
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
if not self.__SEActive( sourceSE ).get( 'Value', {} ).get( 'Read' ):
infoStr = "%s Supplied source Storage Element is not currently allowed for Read." % ( logStr )
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
###########################################################
# Check that the destination storage element is sane and resolve its name
self.log.debug( "%s Verifying dest StorageElement validity (%s)." % ( logStr, destSE ) )
destStorageElement = StorageElement( destSE )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( destSE, res['Message'] ) )
return S_ERROR( errStr )
destSE = destStorageElement.getStorageElementName()['Value']
self.log.verbose( "%s Destination Storage Element verified." % logStr )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
if not self.__SEActive( destSE ).get( 'Value', {} ).get( 'Write' ):
infoStr = "%s Supplied destination Storage Element is not currently allowed for Write." % ( logStr )
self.log.debug( infoStr, destSE )
return S_ERROR( infoStr )
###########################################################
# Get the LFN replicas from the file catalogue
self.log.debug( "%s Attempting to obtain replicas for %s." % ( logStr, lfn ) )
res = self.getReplicas( lfn )
if not res[ 'OK' ]:
errStr = "%s Completely failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
self.log.debug( "%s Successfully obtained replicas for LFN." % logStr )
lfnReplicas = res['Value']['Successful'][lfn]
###########################################################
# Check the file is at the sourceSE
self.log.debug( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE and sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
###########################################################
# If the file catalogue size is zero fail the transfer
self.log.debug( "%s Attempting to obtain size for %s." % ( logStr, lfn ) )
res = self.fc.getFileSize( lfn )
if not res['OK']:
errStr = "%s Completely failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
catalogueSize = res['Value']['Successful'][lfn]
if catalogueSize == 0:
errStr = "%s Registered file size is 0." % logStr
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "%s File size determined to be %s." % ( logStr, catalogueSize ) )
self.log.verbose( "%s Replication initialization successful." % logStr )
resDict = {
'DestStorage' : destStorageElement,
'DestSE' : destSE,
'Replicas' : lfnReplicas,
'CatalogueSize' : catalogueSize
}
return S_OK( resDict )
def __resolveBestReplicas( self, lfn, sourceSE, lfnReplicas, catalogueSize ):
""" find best replicas """
###########################################################
# Determine the best replicas (remove banned sources, invalid storage elements and file with the wrong size)
# It's not really the best, but the one authorized
logStr = "__resolveBestReplicas:"
replicaPreference = []
for diracSE, pfn in lfnReplicas.items():
if sourceSE and diracSE != sourceSE:
self.log.debug( "%s %s replica not requested." % ( logStr, diracSE ) )
continue
if not self.__SEActive( diracSE ).get( 'Value', {} ).get( 'Read' ):
self.log.debug( "%s %s is currently not allowed as a source." % ( logStr, diracSE ) )
else:
self.log.debug( "%s %s is available for use." % ( logStr, diracSE ) )
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
# pfn = returnSingleResult( storageElement.getPfnForLfn( lfn ) ).get( 'Value', pfn )
remoteProtocols = storageElement.getRemoteProtocols()
if not remoteProtocols['OK']:
self.log.debug( "%s : could not get remote protocols %s" % ( diracSE, remoteProtocols['Message'] ) )
continue
remoteProtocols = remoteProtocols['Value']
if remoteProtocols:
self.log.debug( "%s Attempting to get source pfns for remote protocols." % logStr )
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, protocol = self.thirdPartyProtocols ) )
if res['OK']:
sourcePfn = res['Value']
# print pfn, sourcePfn
self.log.debug( "%s Attempting to get source file size." % logStr )
res = storageElement.getFileSize( sourcePfn )
if res['OK']:
if sourcePfn in res['Value']['Successful']:
sourceFileSize = res['Value']['Successful'][sourcePfn]
self.log.debug( "%s Source file size determined to be %s." % ( logStr, sourceFileSize ) )
if catalogueSize == sourceFileSize:
fileTuple = ( diracSE, sourcePfn )
replicaPreference.append( fileTuple )
else:
errStr = "%s Catalogue size and physical file size mismatch." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, sourcePfn ) )
else:
errStr = "%s Failed to get physical file size." % logStr
self.log.always( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Value']['Failed'][sourcePfn] ) )
else:
errStr = "%s Completely failed to get physical file size." % logStr
self.log.debug( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Message'] ) )
else:
errStr = "%s Failed to get PFN for replication for StorageElement." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
errStr = "%s Source Storage Element has no remote protocols." % logStr
self.log.debug( errStr, diracSE )
if not replicaPreference:
errStr = "%s Failed to find any valid source Storage Elements." % logStr
self.log.debug( errStr )
return S_ERROR( errStr )
else:
return S_OK( replicaPreference )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN':physicalFile, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
fileCatalog = self.fc
res = fileCatalog.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, pfn, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, pfn ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, pfn in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, pfn in replicaTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( pfn, protocol = self.registrationProtocol, withPort = False ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.debug( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, pfn, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fc.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force == None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fc.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory.
if lfns:
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( ( lfn, pfn ) )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
fileTuple = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, fileTuple )
if not res['OK']:
errStr = res['Message']
for lfn, pfn in fileTuple:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fc.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.debug( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
replicaTuples.append( ( lfn, repDict[storageElementName] ) )
res = self.__removeReplica( storageElementName, replicaTuples )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, fileTuple ):
""" remove replica """
lfnDict = {}
failed = {}
se = None if self.useCatalogPFN else StorageElement( storageElementName ) # Placeholder for the StorageElement object
if se:
res = se.isValid( 'removeFile' )
if not res['OK']:
return res
for lfn, pfn in fileTuple:
res = self.__verifyOperationWritePermission( lfn )
if not res['OK'] or not res['Value']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
failed[lfn] = errStr
else:
# This is the PFN as in the FC
lfnDict[lfn] = pfn
# Now we should use the constructed PFNs if needed, for the physical removal
# Reverse lfnDict into pfnDict with required PFN
if self.useCatalogPFN:
pfnDict = dict( zip( lfnDict.values(), lfnDict.keys() ) )
else:
pfnDict = dict( [ ( se.getPfnForLfn( lfn )['Value'].get( 'Successful', {} ).get( lfn, lfnDict[lfn] ), lfn ) for lfn in lfnDict] )
# removePhysicalReplicas is called with real PFN list
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
if not res['OK']:
errStr = "__removeReplica: Failed to remove physical replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( res['Message'] )
failed.update( dict( [( pfnDict[pfn], error ) for pfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( pfnDict[pfn], lfnDict[pfnDict[pfn]], storageElementName ) for pfn in res['Value']['Successful']]
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.debug( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.debug( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue """
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fc.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
for lfn in res['Value']['Successful']:
infoStr = "__removeCatalogReplica: Successfully removed replica."
self.log.debug( infoStr, lfn )
if res['Value']['Successful']:
self.log.debug( "__removeCatalogReplica: Removed %d replicas" % len( res['Value']['Successful'] ) )
success = res['Value']['Successful']
if success:
self.log.info( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
for lfn, error in res['Value']['Failed'].items():
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplica( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.debug( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
pfnDict = {}
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
sePfn = repDict[storageElementName]
pfnDict[sePfn] = lfn
self.log.debug( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( pfnDict ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
for pfn, error in res['Value']['Failed'].items():
failed[pfnDict[pfn]] = error
for pfn in res['Value']['Successful']:
successful[pfnDict[pfn]] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, pfnsToRemove ):
""" remove replica from storage element """
self.log.debug( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( pfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( pfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
ret = storageElement.getFileSize( pfnsToRemove )
deletedSizes = ret.get( 'Value', {} ).get( 'Successful', {} )
res = storageElement.removeFile( pfnsToRemove )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for surl, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][surl] = surl
res['Value']['Failed'].pop( surl )
for surl in res['Value']['Successful']:
ret = returnSingleResult( storageElement.getPfnForProtocol( surl, protocol = self.registrationProtocol, withPort = False ) )
if not ret['OK']:
res['Value']['Successful'][surl] = surl
else:
res['Value']['Successful'][surl] = ret['Value']
deletedSize = sum( [size for pfn, size in deletedSizes.items() if pfn in res['Value']['Successful']] )
oDataOperation.setValueByKey( 'TransferSize', deletedSize )
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.debug( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
res = returnSingleResult( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "put: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.debug( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = destPfn
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
# Fix the caching
readStatus = seReadStatus[se] if se in seReadStatus else seReadStatus.setdefault( se, self.__SEActive( se ).get( 'Value', {} ).get( 'Read', False ) )
if not readStatus:
replicas.pop( se )
return S_OK( replicaDict )
def __SEActive( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][resolvedName].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][resolvedName].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.fc.getReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getPfnForLfn by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
seObj = StorageElement( se )
succPfn = seObj.getPfnForLfn( se_lfn[se] ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
##################################################################################################3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists( self, storageElementName, lfn, method, **argsDict ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get replicas for lfn
res = FileCatalog( vo = self.vo ).getReplicas( lfn )
if not res["OK"]:
errStr = "_callReplicaSEFcn: Completely failed to get replicas for LFNs."
self.log.debug( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
pfnDict = {}
se = None # Placeholder for the StorageElement object
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
if self.useCatalogPFN:
pfn = replicas[storageElementName]
else:
se = se if se else StorageElement( storageElementName )
res = se.getPfnForLfn( lfn )
pfn = res.get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[storageElementName] )
pfnDict[pfn] = lfn
else:
errStr = "_callReplicaSEFcn: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
# # call StorageElement function at least
se = se = se if se else StorageElement( storageElementName )
fcn = getattr( se, method )
res = fcn( pfnDict.keys(), **argsDict )
# # check result
if not res["OK"]:
errStr = "_callReplicaSEFcn: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed and successful
for pfn, pfnRes in res["Value"]["Successful"].items():
retDict["Successful"][pfnDict[pfn]] = pfnRes
for pfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][pfnDict[pfn]] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getAccessUrl" )
def getReplicaMetadata( self, lfn, storageElementName ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"prestageFile", lifetime = lifetime )
def pinReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"pinFile", lifetime = lifetime )
def releaseReplica( self, lfn, storageElementName ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"getFile", localPath = localPath )
``` |
[
{
"content": "Here is the code content:\n```python\nfrom django import forms\nfrom django.contrib.comments.forms import CommentForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_comments_xtd.conf import settings\nfrom django_comments_xtd.models import TmpXtdComment\n\n\nclass XtdCommentF... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nfrom django import forms\nfrom django.contrib.comments.forms import CommentForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_comments_xtd.conf import settings\nfrom django_comments_xtd.models import TmpXtdComment\n\n\nc... | ```python
from django import forms
from django.contrib.comments.forms import CommentForm
from django.utils.translation import ugettext_lazy as _
from django_comments_xtd.conf import settings
from django_comments_xtd.models import TmpXtdComment
class XtdCommentForm(CommentForm):
followup = forms.BooleanField(
required=False, label=_("Notify me of follow up comments via email"))
reply_to = forms.IntegerField(required=True, initial=0, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
comment = kwargs.pop("comment", None)
if comment:
initial = kwargs.pop("initial", {})
initial.update({"reply_to": comment.pk})
kwargs["initial"] = initial
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['name'] = forms.CharField(
widget=forms.TextInput(attrs={'placeholder':_('name')}))
self.fields['email'] = forms.EmailField(
label=_("Email"), help_text=_("Required for comment verification"),
widget=forms.TextInput(attrs={'placeholder':_('email')})
)
self.fields['url'] = forms.URLField(
required=False,
widget=forms.TextInput(attrs={'placeholder':_('website')}))
self.fields['comment'] = forms.CharField(
widget=forms.Textarea(attrs={'placeholder':_('comment')}),
max_length=settings.COMMENT_MAX_LENGTH)
def get_comment_model(self):
return TmpXtdComment
def get_comment_create_data(self):
data = super(CommentForm, self).get_comment_create_data()
data.update({'thread_id': 0, 'level': 0, 'order': 1,
'parent_id': self.cleaned_data['reply_to'],
'followup': self.cleaned_data['followup']})
if settings.COMMENTS_XTD_CONFIRM_EMAIL:
# comment must be verified before getting approved
data['is_public'] = False
return data
``` |
[
{
"content": "```python\n\"\"\"\nShutterfly Data Sink\n\"\"\"\nimport logging\nlog = logging.getLogger(\"modules.Shutterfly\")\n\nimport conduit\nimport conduit.utils as Utils\nfrom conduit.datatypes import Rid\nimport conduit.dataproviders.Image as Image\nimport conduit.Exceptions as Exceptions\nimport conduit... | [
{
"content": "<|memory_start|>```python\n\"\"\"\nShutterfly Data Sink\n\"\"\"\nimport logging\nlog = logging.getLogger(\"modules.Shutterfly\")\n\nimport conduit\nimport conduit.utils as Utils\nfrom conduit.datatypes import Rid\nimport conduit.dataproviders.Image as Image\nimport conduit.Exceptions as Exceptions... | ```python
"""
Shutterfly Data Sink
"""
import logging
log = logging.getLogger("modules.Shutterfly")
import conduit
import conduit.utils as Utils
from conduit.datatypes import Rid
import conduit.dataproviders.Image as Image
import conduit.Exceptions as Exceptions
import conduit.datatypes.Photo as Photo
Utils.dataprovider_add_dir_to_path(__file__, "shutterfly")
from shutterfly import Shutterfly
from gettext import gettext as _
MODULES = {
"ShutterflySink" : {"type" : "dataprovider"},
}
class ShutterflySink(Image.ImageSink):
_name_ = _("Shutterfly")
_description_ = _("Synchronize your Shutterfly photos")
_module_type_ = "sink"
_icon_ = "shutterfly"
_configurable_ = True
def __init__(self, *args):
Image.ImageSink.__init__(self)
self.username = ""
self.password = ""
self.album = ""
self.sapi = None
self.salbum = None
self.sphotos = None
def _get_raw_photo_url(self, photoInfo):
return photoInfo.url
def _get_photo_info(self, id):
if self.sphotos.has_key(id):
return self.sphotos[id]
else:
return None
def _get_photo_formats(self):
return ("image/jpeg", )
def refresh(self):
Image.ImageSink.refresh(self)
self.sapi = Shutterfly(self.username, self.password)
albums = self.sapi.getAlbums()
if not albums.has_key(self.album):
self.salbum = self.sapi.createAlbum(self.album)
else:
self.salbum = albums[self.album]
self.sphotos = self.salbum.getPhotos()
def get_all(self):
return self.sphotos.keys()
def get(self, LUID):
#Image.ImageSink.get(self, LUID)
sphoto = self.sphotos[LUID]
f = Photo.Photo(URI=sphoto.url)
f.set_open_URI(sphoto.url)
f.set_UID(LUID)
return f
def delete(self, LUID):
"""
Delete a photo by ID
Deleting a photo invalidates album length and photo index values.
We must reload the photos (or do something else...)
"""
if not self.sphotos.has_key(LUID):
log.warn("Photo does not exist")
return
try:
self.salbum.deletePhoto(self.sphotos[LUID])
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Delete Error - Try Again.")
self.sphotos = self.salbum.getPhotos()
def _upload_photo(self, uploadInfo):
"""
Upload to album
"""
try:
ret = self.salbum.uploadPhoto(uploadInfo.url, uploadInfo.mimeType, uploadInfo.name)
return Rid(ret.id)
except Exception, e:
raise Exceptions.SyncronizeError("Shutterfly Upload Error.")
def configure(self, window):
"""
Configures the ShutterflySink
"""
widget = Utils.dataprovider_glade_get_widget(
__file__,
"shutterfly.glade",
"ShutterflySinkConfigDialog")
# Get configuration widgets
username = widget.get_widget("username")
password = widget.get_widget("password")
album = widget.get_widget("album")
# Load the widgets with presets
username.set_text(self.username)
password.set_text(self.password)
album.set_text(self.album)
dlg = widget.get_widget("ShutterflySinkConfigDialog")
response = Utils.run_dialog(dlg, window)
if response == True:
self.username = username.get_text()
self.password = password.get_text()
self.album = album.get_text()
dlg.destroy()
def get_configuration(self):
return {
"username" : self.username,
"password" : self.password,
"album" : self.album
}
def is_configured(self, isSource, isTwoWay):
if len(self.username) < 1:
return False
if len(self.password) < 1:
return False
if len(self.album) < 1:
return False
return True
def get_UID(self):
return self.username+":"+self.album
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n\"\"\"Boolean algebra module for SymPy\"\"\"\nfrom collections import defaultdict\n\nfrom sympy.core.basic import Basic\nfrom sympy.core.decorators import deprecated\nfrom sympy.core.operations import LatticeOp\nfrom sympy.core.function import ... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n\"\"\"Boolean algebra module for SymPy\"\"\"\nfrom collections import defaultdict\n\nfrom sympy.core.basic import Basic\nfrom sympy.core.decorators import deprecated\nfrom sympy.core.operations import LatticeOp\nfrom sympy.core.... | ```python
"""Boolean algebra module for SymPy"""
from collections import defaultdict
from sympy.core.basic import Basic
from sympy.core.decorators import deprecated
from sympy.core.operations import LatticeOp
from sympy.core.function import Application, sympify
from sympy.core.compatibility import ordered, product
class Boolean(Basic):
"""A boolean object is an object for which logic operations make sense."""
__slots__ = []
def __and__(self, other):
"""Overloading for & operator"""
return And(self, other)
def __or__(self, other):
"""Overloading for |"""
return Or(self, other)
def __invert__(self):
"""Overloading for ~"""
return Not(self)
def __rshift__(self, other):
"""Overloading for >>"""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for <<"""
return Implies(other, self)
def __xor__(self, other):
return Xor(self, other)
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def __call__(self, *args):
return self.func(*[arg(*args) for arg in self.args])
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> x & y
And(x, y)
"""
zero = False
identity = True
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
"""
zero = True
identity = False
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
"""
@classmethod
def eval(cls, *args):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True
and the rest are False.
Returns False if an even number of the arguments are True
and the rest are False.
Examples
========
>>> from sympy.logic.boolalg import Xor
>>> Xor(True, False)
True
>>> Xor(True, True)
False
>>> Xor(True, False, True, True, False)
True
>>> Xor(True, False, True, False)
False
"""
if not args:
return False
args = list(args)
A = args.pop()
while args:
B = args.pop()
A = Or(And(A, Not(B)), And(Not(A), B))
return A
class Not(BooleanFunction):
"""
Logical Not function (negation)
Note: De Morgan rules applied automatically
"""
is_Not = True
@classmethod
def eval(cls, arg):
"""
Logical Not function (negation)
Returns True if the statement is False
Returns False if the statement is True
Examples
========
>>> from sympy.logic.boolalg import Not, And, Or
>>> from sympy.abc import x
>>> Not(True)
False
>>> Not(False)
True
>>> Not(And(True, False))
True
>>> Not(Or(True, False))
False
>>> Not(And(And(True, x), Or(x, False)))
Not(x)
"""
if arg in (0, 1): # includes True and False, too
return not bool(arg)
# apply De Morgan Rules
if arg.func is And:
return Or(*[Not(a) for a in arg.args])
if arg.func is Or:
return And(*[Not(a) for a in arg.args])
if arg.func is Not:
return arg.args[0]
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
"""
@classmethod
def eval(cls, *args):
"""
Logical NAND function.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> Nand(False, True)
True
>>> Nand(True, True)
False
"""
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
"""
@classmethod
def eval(cls, *args):
"""
Logical NOR function.
Returns False if any argument is True
Returns True if all arguments are False
Examples
========
>>> from sympy.logic.boolalg import Nor
>>> Nor(True, False)
False
>>> Nor(True, True)
False
>>> Nor(False, True)
False
>>> Nor(False, False)
True
"""
return Not(Or(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
"""
@classmethod
def eval(cls, *args):
"""
Logical implication.
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False
Returns True otherwise.
Examples
========
>>> from sympy.logic.boolalg import Implies
>>> Implies(True, False)
False
>>> Implies(False, False)
True
>>> Implies(True, True)
True
>>> Implies(False, True)
True
"""
try:
A, B = args
except ValueError:
raise ValueError(
"%d operand(s) used for an Implies "
"(pairs are required): %s" % (len(args), str(args)))
if A is True or A is False or B is True or B is False:
return Or(Not(A), B)
else:
return Basic.__new__(cls, *args)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False
"""
@classmethod
def eval(cls, *args):
"""
Equivalence relation.
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> from sympy.logic.boolalg import Equivalent, And
>>> from sympy.abc import x
>>> Equivalent(False, False, False)
True
>>> Equivalent(True, False, False)
False
>>> Equivalent(x, And(x, True))
True
"""
argset = set(args)
if len(argset) <= 1:
return True
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return Nor(*argset)
return Basic.__new__(cls, *set(args))
class ITE(BooleanFunction):
"""
If then else clause.
"""
@classmethod
def eval(cls, *args):
"""
If then else clause
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C
Examples
========
>>> from sympy.logic.boolalg import ITE, And, Xor, Or
>>> from sympy.abc import x, y, z
>>> x = True
>>> y = False
>>> z = True
>>> ITE(x, y, z)
False
>>> ITE(Or(x, y), And(x, z), Xor(z, x))
True
"""
args = list(args)
if len(args) == 3:
return Or(And(args[0], args[1]), And(Not(args[0]), args[2]))
raise ValueError("ITE expects 3 arguments, but got %d: %s" %
(len(args), str(args)))
### end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset([A, B])
>>> conjuncts(A | B)
frozenset([Or(A, B)])
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset([A, B])
>>> disjuncts(A & B)
frozenset([And(A, B)])
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
And(Or(A, Not(B)), Or(A, Not(C)))
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
Or(And(B, C), And(C, Not(A)))
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""
Distributes info[1] over info[2] with respect to info[0].
"""
if info[0].func is info[2]:
for arg in info[0].args:
if arg.func is info[1]:
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*map(_distribute,
[(info[2](c, rest), info[1], info[2]) for c in conj.args]))
elif info[0].func is info[1]:
return info[1](*map(_distribute,
[(x, info[1], info[2]) for x in info[0].args]))
else:
return info[0]
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...)
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
And(Or(D, Not(A)), Or(D, Not(B)))
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
simplified_expr = distribute_and_over_or(simplify_logic(expr))
if len(simplified_expr.args) < len(to_cnf(expr).args):
return simplified_expr
else:
return to_cnf(expr)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...)
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C, D
>>> to_dnf(B & (A | C))
Or(And(A, B), And(B, C))
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
simplified_expr = distribute_or_over_and(simplify_logic(expr))
if len(simplified_expr.args) < len(to_dnf(expr).args):
return simplified_expr
else:
return to_dnf(expr)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if expr.func is function2:
for lit in expr.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if expr.func is Not:
if not expr.args[0].is_Atom:
return False
if expr.func is not function1:
return False
for cls in expr.args:
if cls.is_Atom:
continue
if cls.func is Not:
if not cls.args[0].is_Atom:
return False
elif cls.func is not function2:
return False
for lit in cls.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
Or(B, Not(A))
>>> eliminate_implications(Equivalent(A, B))
And(Or(A, Not(B)), Or(B, Not(A)))
"""
expr = sympify(expr)
if expr.is_Atom:
return expr # (Atoms are unchanged.)
args = map(eliminate_implications, expr.args)
if expr.func is Implies:
a, b = args[0], args[-1]
return (~a) | b
elif expr.func is Equivalent:
a, b = args[0], args[-1]
return (a | Not(b)) & (b | Not(a))
else:
return expr.func(*args)
@deprecated(
useinstead="sympify", issue=3451, deprecated_since_version="0.7.3")
def compile_rule(s):
"""
Transforms a rule into a SymPy expression
A rule is a string of the form "symbol1 & symbol2 | ..."
Note: This function is deprecated. Use sympify() instead.
"""
import re
return sympify(re.sub(r'([a-zA-Z_][a-zA-Z0-9_]*)', r'Symbol("\1")', s))
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [set([1, 2]), set([2])]
True
"""
# Convert the symbol list into a dict
symbols = dict(zip(symbols, xrange(1, len(symbols) + 1)))
def append_symbol(arg, symbols):
if arg.func is Not:
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [set(append_symbol(arg, symbols) for arg in Or.make_args(c))
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
else:
pass # ignore the 3s
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
else:
pass # ignore the 3s
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = range(len(terms))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in filter(lambda _: _ is not None, todo)])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x != 3 and x != minterm[i]:
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1:
if _compare_term(x, z):
if z not in essential:
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform(['w','x','y','z'], minterms, dontcares)
Or(And(Not(w), z), And(y, z))
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [Symbol(v) if not isinstance(v, Symbol) else v
for v in variables]
if minterms == []:
return False
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform(['w','x','y','z'], minterms, dontcares)
And(Or(Not(w), y), z)
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [Symbol(v) if not isinstance(v, Symbol) else v
for v in variables]
if minterms == []:
return False
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def simplify_logic(expr):
"""
This function simplifies a boolean function to its
simplified version in SOP or POS form. The return type is an
Or or And object in SymPy. The input can be a string or a boolean
expression.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> b = '(~x & ~y & ~z) | ( ~x & ~y & z)'
>>> simplify_logic(b)
And(Not(x), Not(y))
>>> S(b)
Or(And(Not(x), Not(y), Not(z)), And(Not(x), Not(y), z))
>>> simplify_logic(_)
And(Not(x), Not(y))
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = list(expr.free_symbols)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.subs(zip(variables, t)) == True:
truthtable.append(t)
if (len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
else:
return POSform(variables, truthtable)
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, 2): [x], (0, 0, 1, 0, 3): [a, b], (0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(zip(f, [[0] * 5 for fi in f]))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(ai.func is Not for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(d.iteritems()):
inv[tuple(v)].append(k)
return inv
def bool_equal(bool1, bool2, info=False):
"""Return True if the two expressions represent the same logical
behavior for some correspondence between the variables of each
(which may be different). For example, And(x, y) is logically
equivalent to And(a, b) for {x: a, y: b} (or vice versa). If the
mapping is desired, then set ``info`` to True and the simplified
form of the functions and the mapping of variables will be
returned.
Examples
========
>>> from sympy import SOPform, bool_equal, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform(['x','z','y'],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform(['a','b','c'],[[1, 0, 1], [1, 0, 0]])
>>> bool_equal(function1, function2, info=True)
(And(Not(z), y), {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_equal(eq, eq2)
True
>>> bool_equal(eq, eq2, info=True)
(Or(And(Not(y), w), And(Not(y), z), And(x, y)), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_equal(eq, eq.subs(c, x), info=True)
(And(Or(Not(a), Not(b)), Or(a, b), c, d), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue 1736) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None
if len(function1.args) != len(function2.args):
return None
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m and info:
return a, m
return m is not None
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret k... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: k... | ```python
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
## Grappelli configuration
GRAPPELLI_ADMIN_TITLE = "Radius Administration"
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'radauth',
'radacct',
'radclient',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.request',
#)
ROOT_URLCONF = 'radius.urls'
WSGI_APPLICATION = 'radius.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'NAME': '{{ mysql_db }}',
'USER': '{{ mysql_user }}',
'PASSWORD': '{{ mysql_password }}',
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'null': {
'class':'logging.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join("{{ logdir_path }}", "netapi.log"),
'formatter': 'verbose',
'backupCount': 20,
'maxBytes': 1024*1024*20, # 20 MB
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.request': {
'handlers': ['logfile', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console', 'logfile'],
'level': 'WARNING',
'propagate': False,
},
'networkapi': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
'celery': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
'celery.tasks': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
'py.warnings': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
}
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'GMT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '{{ static_root }}'
X_FRAME_OPTIONS='DENY'
SECURE_CONTENT_TYPE_NOSNIFF=True
SECURE_BROWSER_XSS_FILTER=True
#SECURE_SSL_REDIRECT=True
SESSION_COOKIE_SECURE=True
CSRF_COOKIE_SECURE=True
CSRF_COOKIE_HTTPONLY=True
``` |
[
{
"content": "Here is the snippet:\n```python\nimport theano.tensor as T\n\n\ndef cca_loss(outdim_size, use_all_singular_values):\n \"\"\"\n The main loss function (inner_cca_objective) is wrapped in this function due to\n the constraints imposed by Keras on objective functions\n \"\"\"\n def inn... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\nimport theano.tensor as T\n\n\ndef cca_loss(outdim_size, use_all_singular_values):\n \"\"\"\n The main loss function (inner_cca_objective) is wrapped in this function due to\n the constraints imposed by Keras on objective functions\n \"\... | ```python
import theano.tensor as T
def cca_loss(outdim_size, use_all_singular_values):
"""
The main loss function (inner_cca_objective) is wrapped in this function due to
the constraints imposed by Keras on objective functions
"""
def inner_cca_objective(y_true, y_pred):
"""
It is the loss function of CCA as introduced in the original paper. There can be other formulations.
It is implemented by Theano tensor operations, and does not work on Tensorflow backend
y_true is just ignored
"""
r1 = 1e-4
r2 = 1e-4
eps = 1e-12
o1 = o2 = y_pred.shape[1]//2
# unpack (separate) the output of networks for view 1 and view 2
H1 = y_pred[:, 0:o1].T
H2 = y_pred[:, o1:o1+o2].T
m = H1.shape[1]
H1bar = H1 - (1.0 / m) * T.dot(H1, T.ones([m, m]))
H2bar = H2 - (1.0 / m) * T.dot(H2, T.ones([m, m]))
SigmaHat12 = (1.0 / (m - 1)) * T.dot(H1bar, H2bar.T)
SigmaHat11 = (1.0 / (m - 1)) * T.dot(H1bar, H1bar.T) + r1 * T.eye(o1)
SigmaHat22 = (1.0 / (m - 1)) * T.dot(H2bar, H2bar.T) + r2 * T.eye(o2)
# Calculating the root inverse of covariance matrices by using eigen decomposition
[D1, V1] = T.nlinalg.eigh(SigmaHat11)
[D2, V2] = T.nlinalg.eigh(SigmaHat22)
# Added to increase stability
posInd1 = T.gt(D1, eps).nonzero()[0]
D1 = D1[posInd1]
V1 = V1[:, posInd1]
posInd2 = T.gt(D2, eps).nonzero()[0]
D2 = D2[posInd2]
V2 = V2[:, posInd2]
SigmaHat11RootInv = T.dot(T.dot(V1, T.nlinalg.diag(D1 ** -0.5)), V1.T)
SigmaHat22RootInv = T.dot(T.dot(V2, T.nlinalg.diag(D2 ** -0.5)), V2.T)
Tval = T.dot(T.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)
if use_all_singular_values:
# all singular values are used to calculate the correlation
corr = T.sqrt(T.nlinalg.trace(T.dot(Tval.T, Tval)))
else:
# just the top outdim_size singular values are used
[U, V] = T.nlinalg.eigh(T.dot(Tval.T, Tval))
U = U[T.gt(U, eps).nonzero()[0]]
U = U.sort()
corr = T.sum(T.sqrt(U[0:outdim_size]))
return -corr
return inner_cca_objective
``` |
[
{
"content": "```python\nfrom urllib.parse import urlparse\n\nfrom etherealpost.db.filters import JinjaFilters\nfrom pymongo import MongoClient\nfrom pyramid.config import Configurator\nfrom pyramid.httpexceptions import HTTPMovedPermanently\n\n\n# http://stackoverflow.com/a/15705778\ndef add_auto_route(config,... | [
{
"content": "<|memory_start|>```python\nfrom urllib.parse import urlparse\n\nfrom etherealpost.db.filters import JinjaFilters\nfrom pymongo import MongoClient\nfrom pyramid.config import Configurator\nfrom pyramid.httpexceptions import HTTPMovedPermanently\n\n\n# http://stackoverflow.com/a/15705778\ndef add_au... | ```python
from urllib.parse import urlparse
from etherealpost.db.filters import JinjaFilters
from pymongo import MongoClient
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPMovedPermanently
# http://stackoverflow.com/a/15705778
def add_auto_route(config, name, pattern, **kw):
config.add_route(name, pattern, **kw)
if not pattern.endswith('/'):
config.add_route(name + '_auto', pattern + '/')
def redirector(request):
return HTTPMovedPermanently(
request.route_url(name, _query=request.GET,
**request.matchdict))
config.add_view(redirector, route_name=name + '_auto')
import os
from pyramid.response import FileResponse
def favicon_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'favicon.ico')
return FileResponse(icon, request=request)
def robots_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'robots.txt')
return FileResponse(icon, request=request)
def sitemap_view(request):
here = os.path.dirname(__file__)
icon = os.path.join(here, 'assets', 'sitemap.xml')
return FileResponse(icon, request=request)
def main(global_config, **settings):
config = Configurator(settings=settings)
# Includes
config.include('pyramid_jinja2')
config.commit()
config.include('pyramid_scss')
# Database
db_url = urlparse(settings['mongo_uri'])
config.registry.db = MongoClient(host=db_url.hostname,
port=db_url.port)
def add_db(request):
db = config.registry.db[db_url.path[1:]]
if db_url.username and db_url.password:
db.authenticate(db_url.username, db_url.password)
return db
config.add_request_method(add_db, 'db', reify=True)
# Jinja Filters
jinja2_env = config.get_jinja2_environment()
# and Stripe and Paypal
jinja2_env.globals['stripe_pk'] = settings['stripe_pk']
jinja2_env.globals['paypal_id'] = settings['paypal_id']
jinja2_env.globals['btc_address'] = settings['btc_address']
jf = JinjaFilters(db=config.registry.db)
jf.set_filters(jinja2_env)
# Routing
add_auto_route(config, 'home', '/')
add_auto_route(config, 'stripe_charge', '/charge')
add_auto_route(config, 'about', '/about')
add_auto_route(config, 'region_switch', '{region:(us|eu)}')
add_auto_route(config, 'realm', '{region:(us|eu)}/{realm_slug}')
add_auto_route(config, 'item',
'{region:(us|eu)}/{realm_slug}/item/{item:\d+}')
add_auto_route(config, 'seller',
'{region:(us|eu)}/{realm_slug}/seller/{seller}')
# Static Files
config.add_route('favicon', '/favicon.ico')
config.add_route('robots', '/robots.txt')
config.add_route('sitemap', '/sitemap.xml')
config.add_view(view='etherealpost.favicon_view', route_name='favicon')
config.add_view(view='etherealpost.robots_view', route_name='robots')
config.add_view(view='etherealpost.sitemap_view', route_name='sitemap')
config.add_route('css', '/assets/css/{css_path:.*}.css')
config.add_view(route_name='css', view='pyramid_scss.controller.get_scss',
renderer='scss', request_method='GET')
config.add_static_view(name='assets', path='etherealpost:assets')
# Scan things
config.scan('.views')
# Here we gooooo!
return config.make_wsgi_app()
``` |
[
{
"content": "Write the code verbatim:\n```python\n# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.\n# Copyright (C) NIWA & British Crown (Met Office) & Contributors.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published b... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.\n# Copyright (C) NIWA & British Crown (Met Office) & Contributors.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public Licens... | ```python
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n\"\"\"engine.SCons.Platform.hpux\n\nPlatform-specific initialization for HP-UX systems.\n\nThere normally shouldn't be any need to import this module directly. It\nwill usually be imported through the generic SCons.Platform.Platform()\nsele... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n\"\"\"engine.SCons.Platform.hpux\n\nPlatform-specific initialization for HP-UX systems.\n\nThere normally shouldn't be any need to import this module directly. It\nwill usually be imported through the generic SCons.Platform.... | ```python
"""engine.SCons.Platform.hpux
Platform-specific initialization for HP-UX systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/hpux.py 2009/09/04 16:33:07 david"
import posix
def generate(env):
posix.generate(env)
#Based on HP-UX11i: ARG_MAX=2048000 - 3000 for environment expansion
env['MAXLINELENGTH'] = 2045000
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# powered by Can... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.\n#\n# This file is part of Navitia,\n# the software to build cool stuff with public transport.\n#\n# Hope you'll enjoy and contribute to this project,\n# ... | ```python
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from navitiacommon import response_pb2
from operator import attrgetter
def has_walking_first(journey):
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return True
elif section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Walking:
return False
elif section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Walking:
return False
return True
def has_bike_first(journey):
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return True
elif section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Bike:
return False
elif section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Bike:
return False
return True
def has_bss_first(journey):
has_bss = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return False
elif section.type == response_pb2.BSS_RENT:
return True
return False
def has_walking_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt \
and section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Walking:
return False
elif has_pt \
and section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Walking:
return False
return has_pt#we will not be here if there is another fallback mode used after the pt section
def has_bike_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt \
and section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Bike:
return False
elif has_pt \
and section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Bike:
return False
return has_pt#we will not be here if there is another fallback mode used after the pt section
def has_bss_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt and section.type == response_pb2.BSS_RENT:
return True
return False
def has_bss_first_and_walking_last(journey):
return has_bss_first(journey) and has_walking_last(journey)
def has_walking_first_and_bss_last(journey):
return has_walking_first(journey) and has_bss_last(journey)
def has_bss_first_and_bss_last(journey):
return has_bss_first(journey) and has_bss_last(journey)
def has_bike_first_and_walking_last(journey):
return has_bike_first(journey) and has_walking_last(journey)
def has_bike_first_and_bss_last(journey):
return has_bike_first(journey) and has_bss_last(journey)
def bike_duration(journey):
duration = 0
in_bss = False
for section in journey.sections:
if section.type == response_pb2.BSS_RENT:
in_bss = True
if section.type == response_pb2.BSS_PUT_BACK:
in_bss = False
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Bike \
and not in_bss:
duration = duration + section.duration
return duration
def bss_duration(journey):
duration = 0
in_bss = False
for section in journey.sections:
if section.type == response_pb2.BSS_RENT:
in_bss = True
duration += section.duration
if section.type == response_pb2.BSS_PUT_BACK:
in_bss = False
duration += section.duration
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Bike \
and in_bss:
duration = duration + section.duration
return duration
def car_duration(journey):
duration = 0
for section in journey.sections:
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Car:
duration = duration + section.duration
return duration
def walking_duration(journey):
duration = 0
for section in journey.sections:
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Walking:
duration = duration + section.duration
return duration
def pt_duration(journey):
duration = 0
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
duration = duration + section.duration
return duration
def is_non_pt_bss(journey):
return journey.type == 'non_pt_bss'
def is_non_pt_walk(journey):
return journey.type == 'non_pt_walk'
def is_non_pt_bike(journey):
return journey.type == 'non_pt_bike'
max_duration_fallback_modes = {'walking': [response_pb2.Walking],
'bss': [response_pb2.Walking, response_pb2.Bss],
'bike': [response_pb2.Walking, response_pb2.Bss, response_pb2.Bike],
'car': [response_pb2.Walking, response_pb2.Bss, response_pb2.Bike, response_pb2.Car],
}
def filter_journeys_by_fallback_modes(journeys, fallback_modes):
section_is_fallback_or_pt = lambda section: section.type not in \
(response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
or section.street_network.mode in fallback_modes
filter_journey = lambda journey: all(section_is_fallback_or_pt(section) for section in journey.sections) \
and journey.duration > 0
return filter(filter_journey, journeys)
def select_best_journey_by_time(journeys, clockwise, fallback_modes):
list_journeys = filter_journeys_by_fallback_modes(journeys, fallback_modes)
if not list_journeys:
return None
if clockwise:
return min(list_journeys, key=attrgetter('arrival_date_time'))
else:
return max(list_journeys, key=attrgetter('departure_date_time'))
def select_best_journey_by_duration(journeys, clockwise, fallback_modes):
list_journeys = filter_journeys_by_fallback_modes(journeys, fallback_modes)
if not list_journeys:
return None
return min(list_journeys, key=attrgetter('duration'))
fallback_mode_order = ['walking', 'bss', 'bike', 'car']
def fallback_mode_comparator(a, b):
return fallback_mode_order.index(a) - fallback_mode_order.index(b)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n'''\nCreated on 27 lip 2014\n\n@author: Krzysztof Langner\n'''\n\nfrom mewa.client import Connection\n\n\nHOST_URL = \"ws://mewa.cc:9001/ws\"\n# HOST_URL = \"ws://localhost:9000/ws\"\n\nconnection = Connection(HOST_URL)\n\ndef onConnec... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n'''\nCreated on 27 lip 2014\n\n@author: Krzysztof Langner\n'''\n\nfrom mewa.client import Connection\n\n\nHOST_URL = \"ws://mewa.cc:9001/ws\"\n# HOST_URL = \"ws://localhost:9000/ws\"\n\nconnection = Connection(HOST_URL)... | ```python
'''
Created on 27 lip 2014
@author: Krzysztof Langner
'''
from mewa.client import Connection
HOST_URL = "ws://mewa.cc:9001/ws"
# HOST_URL = "ws://localhost:9000/ws"
connection = Connection(HOST_URL)
def onConnected():
connection.getDevices()
connection.sendEvent("serviceA.event2", "78", True)
params = [{"type": "org.fi24.switch", "name": "switch2"}, {"type": "org.fi24.switch", "name": "switch1"}, {"type": "org.fi24.switch", "name": "switch0"}]
connection.sendMessage("device66", "serviceA.level", params)
def onEvent(timestamp, fromDevice, eventId, params):
print("received event %s from %s with params %s" % (eventId, fromDevice, params))
def onMessage(timestamp, fromDevice, msgId, params):
print(timestamp + ": received message %s from %s with params %s" % (timestamp, msgId, fromDevice, params))
def onDevicesEvent(timestamp, devices):
print(timestamp + ": Found devices:")
print(devices)
def onError(reason):
print("Error: " + reason)
def onAck():
print("ACK")
if __name__ == "__main__":
connection.onConnected = onConnected
connection.onEvent = onEvent
connection.onMessage = onMessage
connection.onDevicesEvent = onDevicesEvent
connection.onError = onError
connection.onAck = onAck
connection.connect("admin.test", "python", "l631vxqa", [""])
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy ... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You ma... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.service_usage_v1.services.service_usage.client import (
ServiceUsageClient,
)
from google.cloud.service_usage_v1.services.service_usage.async_client import (
ServiceUsageAsyncClient,
)
from google.cloud.service_usage_v1.types.resources import OperationMetadata
from google.cloud.service_usage_v1.types.resources import Service
from google.cloud.service_usage_v1.types.resources import ServiceConfig
from google.cloud.service_usage_v1.types.resources import State
from google.cloud.service_usage_v1.types.serviceusage import BatchEnableServicesRequest
from google.cloud.service_usage_v1.types.serviceusage import BatchEnableServicesResponse
from google.cloud.service_usage_v1.types.serviceusage import BatchGetServicesRequest
from google.cloud.service_usage_v1.types.serviceusage import BatchGetServicesResponse
from google.cloud.service_usage_v1.types.serviceusage import DisableServiceRequest
from google.cloud.service_usage_v1.types.serviceusage import DisableServiceResponse
from google.cloud.service_usage_v1.types.serviceusage import EnableServiceRequest
from google.cloud.service_usage_v1.types.serviceusage import EnableServiceResponse
from google.cloud.service_usage_v1.types.serviceusage import GetServiceRequest
from google.cloud.service_usage_v1.types.serviceusage import ListServicesRequest
from google.cloud.service_usage_v1.types.serviceusage import ListServicesResponse
__all__ = (
"ServiceUsageClient",
"ServiceUsageAsyncClient",
"OperationMetadata",
"Service",
"ServiceConfig",
"State",
"BatchEnableServicesRequest",
"BatchEnableServicesResponse",
"BatchGetServicesRequest",
"BatchGetServicesResponse",
"DisableServiceRequest",
"DisableServiceResponse",
"EnableServiceRequest",
"EnableServiceResponse",
"GetServiceRequest",
"ListServicesRequest",
"ListServicesResponse",
)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport re\nimport os\nimport unicodedata\nimport ConfigParser\nfrom bson import ObjectId\nfrom datetime import datetime\n\nfrom mining.settings import PROJECT_PATH\n\n\ndef slugfy(tex... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport re\nimport os\nimport unicodedata\nimport ConfigParser\nfrom bson import ObjectId\nfrom datetime import datetime\n\nfrom mining.settings import PROJECT_PATH\n\n... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import re
import os
import unicodedata
import ConfigParser
from bson import ObjectId
from datetime import datetime
from mining.settings import PROJECT_PATH
def slugfy(text):
slug = unicodedata.normalize("NFKD", text).encode("UTF-8", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug:
return None
return slug
def conf(section, ini="mining.ini"):
config = ConfigParser.ConfigParser()
config.read(os.path.join(PROJECT_PATH, ini))
_dict = {}
options = config.options(section)
for option in options:
try:
_dict[option] = config.get(section, option)
except:
_dict[option] = None
if 'sql_conn_params' in options:
import ast
_dict['sql_conn_params'] = ast.literal_eval(_dict['sql_conn_params'])
else:
_dict['sql_conn_params'] = {}
return _dict
def log_it(s, name=u"core"):
with open("/tmp/openmining-{}.log".format(name), "a") as log:
msg = u"{} => {}\n".format(datetime.now(), s)
log.write(msg.encode('utf-8'))
def parse_dumps(obj):
if isinstance(obj, datetime):
return str(obj.strftime("%Y-%m-%d %H:%M:%S"))
if isinstance(obj, ObjectId):
return str(obj)
return json.JSONEncoder.default(obj)
def __from__(path):
try:
_import = path.split('.')[-1]
_from = u".".join(path.split('.')[:-1])
return getattr(__import__(_from, fromlist=[_import]), _import)
except:
return object
``` |
[
{
"content": "Here is the source code:\n```python\n\"\"\"\ntebdIsing2.py\nUse TEBD to compute ground state of transverse field Ising model.\n2014-09-15\n\"\"\"\n\nimport numpy as np\nfrom cmath import *\nfrom mpstest16 import *\nimport time\n\ndef main():\n test0()\n\ndef test0():\n (J,muB,N,t,nsteps,chi)... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n\"\"\"\ntebdIsing2.py\nUse TEBD to compute ground state of transverse field Ising model.\n2014-09-15\n\"\"\"\n\nimport numpy as np\nfrom cmath import *\nfrom mpstest16 import *\nimport time\n\ndef main():\n test0()\n\ndef test0():\n (J,muB... | ```python
"""
tebdIsing2.py
Use TEBD to compute ground state of transverse field Ising model.
2014-09-15
"""
import numpy as np
from cmath import *
from mpstest16 import *
import time
def main():
test0()
def test0():
(J,muB,N,t,nsteps,chi) = (0.1,1,3,0.1,100,5)
for N in xrange(3,11):
test2(J,muB,N,t,nsteps,chi)
def test1():
print(getU(1,1,1))
a = np.random.rand(3,3)
show(a,"a")
show(np.linalg.eig(a)[0],"eignenvalues of a")
ea = expm(a)
show(ea,"e^a")
show(np.linalg.eig(ea)[0],"eigenvalues of e^a")
show(np.log(np.linalg.eig(ea)[0]),"log of eigenvalues of e^a")
print(s(0),"I")
print(s(1),"X")
print(s(2),"Y")
print(s(3),"Z")
def test2(J,muB,N,t,nsteps,chi):
print("\nStarting TEBD for N = %d Ising model with parameters:"%N)
print(" (J,muB,N) = (%.4f,%.4f,%d)"%(J,muB,N))
print(" (t,nsteps,chi) = (%f,%d,%d)"%(t,nsteps,chi))
startTime = time.clock()
mps = tebdIsing(J,muB,N,t,nsteps,chi)
runTime = time.clock()-startTime
print("Simulation completed in %f seconds"%runTime)
gs = getStateOBC(mps)
gs = gs/np.linalg.norm(gs) # Normalize state
## show(gs,"Ground State")
## print("Ground state =")
## displayState(gs)
startTime = time.clock()
energyBF = getEnergyBruteForce(J,muB,N,gs).real
## print("Energy = %f, Energy per spin = %f"%(energyBF,energyBF/N))
## runTime = time.clock() - startTime
## print("Brute force energy computed in %f seconds"%runTime)
startTime = time.clock()
energy = getEnergy(J,muB,N,mps).real
print("Energy = %f, Energy per spin = %f"%(energy,energy/N))
runTime = time.clock() - startTime
print("MPO energy computed in %f seconds"%runTime)
def test3():
print(pairHamiltonianMPO())
def isingH(J,muB,N):
"""
Full matrix representation of ising model.
"""
pairs = np.zeros((2**N,2**N),dtype=complex)
for i in xrange(N-1):
pairs += pauli([1,1],[i,i+1],N)
fields = np.zeros((2**N,2**N),dtype=complex)
for i in xrange(N):
fields += pauli([3],[i],N)
return -J*pairs-muB*fields
def tebdIsing(J,muB,N,t,nsteps,chi):
"""
Run TEBD algorithm on 1D N-spin transverse field Ising model.
Uses open boundary conditions and imaginary time evolution.
Parameters
----------
J : float
Pair-wise interaction energy.
muB : float
Magnetic energy in transverse B-field.
N : int
Number of spins.
t : float
Timestep of each iteration.
nsteps : int
Number of time evolution iterations simulated.
Returns
-------
groundState : list
MPS representation of ground state.
energies : (nsteps) ndarray
Energies at each timestep.
"""
# Initiate system with random MPS state.
d = 2
state = randomMPSOBC(N,chi,d)
# Initilise list of energies at each iteration
# Compute time evolution operators.
U = getU(J,muB,t) # Acts on pairs of spins in middle
Ub = getUb(J,muB,t) # Acts on only one boundary spin
# Run iteration nstep times
for step in xrange(nsteps):
# First half evolution
# Evolve first two spins
state[0],state[1] = leftPair(state[0],state[1],U,chi,d)
# Evolve middle spins
for i in xrange(2,N-2,2):
state[i],state[i+1] = middlePair(state[i],state[i+1],U,chi,d)
# Evolve last spin pair (or single spin if odd)
## show(state[-1].shape,"state[-1]")
if N % 2 and N > 2: # N is odd
state[-1] = rightSingle(state[-1],Ub)
## print("odd")
elif N > 2: # N is even
state[-2],state[-1] = rightPair(state[-2],state[-1],U,chi,d)
## show(state[-1].shape,"state[-1]")
# Second half evolution
# Evolve first spin
state[0] = leftSingle(state[0],Ub)
# Evolve middle spins
for i in xrange(1,N-2,2):
state[i],state[i+1] = middlePair(state[i],state[i+1],U,chi,d)
## show(state[-1].shape,"state[-1]")
state[-1] = rightSingle(state[-1],Ub)
## show(state[-1].shape,"state[-1]")
## # Evolve last spin (or spin pair if odd)
## show(state[-1].shape,"state[-1]")
if N % 2 and N > 2: # N is odd
state[-2],state[-1] = rightPair(state[-2],state[-1],U,chi,d)
elif N > 2: # N is even and greater than 2
state[-1] = rightSingle(state[-1],Ub)
## energies.append(getEnergy(state))
return state
def middlePair(A,B,U,chi,d):
"""
Evolve a pair of spins in middle.
"""
lbd = A.shape[0] # Left bond dimension
rbd = B.shape[2] # Right bond dimension
theta = np.tensordot(A,U,axes=(1,2))
theta = np.tensordot(theta,B,axes=((1,4),(0,1)))
theta = np.reshape(theta,(lbd*d,rbd*d))
(a,b) = efficientSVD(theta,chi)
a = np.reshape(a,(lbd,d,a.shape[1]))
b = np.reshape(b,(b.shape[0],d,rbd))
return (a,b)
def leftPair(A,B,U,chi,d):
"""
Evolve a pair of spins on left.
"""
rbd = B.shape[2] # Right bond dimension
theta = np.tensordot(A,U,axes=(0,2))
theta = np.tensordot(theta,B,axes=((0,3),(0,1)))
theta = np.reshape(theta,(d,d*rbd))
(a,b) = efficientSVD(theta,chi)
b = np.reshape(b,(b.shape[0],d,rbd))
return (a,b)
def rightPair(A,B,U,chi,d):
"""
Evolve a pair of spins on right.
"""
lbd = A.shape[0] # Left bond dimension
## show(A.shape,"A")
## show(B.shape,"B")
## show(U.shape,"U")
theta = np.tensordot(A,U,axes=(1,2))
## show(theta.shape,"A*U")
theta = np.tensordot(theta,B,axes=((1,4),(0,1)))
## show(theta.shape,"A*U*B")
theta = np.reshape(theta,(lbd*d,d))
(a,b) = efficientSVD(theta,chi)
a = np.reshape(a,(lbd,d,a.shape[1]))
return (a,b)
def leftSingle(A,Ub):
"""
Evolve a single spin on left end.
"""
## show(A.shape,"leftSingleA")
## show(Ub.shape,"leftSingleUb")
return np.tensordot(Ub,A,axes=(1,0))
def rightSingle(A,Ub):
"""
Evolve a single spin on right end.
"""
return np.tensordot(A,Ub,axes=(1,1))
def pairHamiltonianMPO():
XX = np.kron(s(1),s(1))
(a,b) = efficientSVD(XX,10)
(a,b) = (np.reshape(a,(2,2,4)),np.reshape(b,(4,2,2)))
## print(np.reshape(np.tensordot(a,b,(-1,0)),(4,4)))
return (a,b)
def getEnergy(J,muB,N,mps):
energy = (-J*localEnergy(mps)-muB*fieldEnergy(mps))/\
innerProductOBC(mps,mps)
return energy
def localEnergy(mps):
"""
Energy of local interactions.
"""
N = len(mps)
energy = 0
I = np.reshape(s(0),(1,2,2,1))
X = np.reshape(s(1),(1,2,2,1))
for i in xrange(0,N-1):
hamiltonian = [I for x in xrange(N)]
## show((hamiltonian),"Hamiltonian MPO")
## show(N,"N")
## show(i,"i")
## show(hamiltonian[i],"hamiltonian[i]")
## show(hamiltonian[i+1],"hamiltonian[i+1]")
hamiltonian[i] = X
hamiltonian[i+1] = X
hamiltonian[0] = np.reshape(hamiltonian[0],(2,2,1))
hamiltonian[-1] = np.reshape(hamiltonian[-1],(1,2,2))
energy += operatorInnerOBC(mps,hamiltonian,mps)
return energy
def fieldEnergy(mps):
"""
Energy of spins in magnetic field.
"""
N = len(mps)
energy = 0
I = np.reshape(s(0),(1,2,2,1))
Z = np.reshape(s(3),(1,2,2,1))
for i in xrange(N):
hamiltonian = [I for x in xrange(N)]
hamiltonian[i] = Z
hamiltonian[0] = np.reshape(hamiltonian[0],(2,2,1))
hamiltonian[-1] = np.reshape(hamiltonian[-1],(1,2,2))
energy += operatorInnerOBC(mps,hamiltonian,mps)
return energy
def getEnergyBruteForce(J,muB,N,state):
"""
Energy of state by brute force with 2**N by 2**N Hamiltonian matrix.
E = <a|H|a>.
Parameters
----------
state : (2**N,) ndarray
State vector of system.
Returns
-------
energy : complex
Energy of the system.
"""
return np.dot(np.conj(state),np.dot(isingH(J,muB,N),state))
def getUb(J,muB,t):
"""
Time evolution operators acting on boundaries.
Parameters
----------
J : float
Pair-wise interaction energy.
muB : float
Magnetic energy of each spin with dipole moment mu in field B.
t : float
Timestep of each iteration.
Returns
-------
startU : (2,2) ndarray
Non-unitary evolution operator acting on single qubit at boundary.
"""
return expm(-muB/2*s(1)*t)
def getU(J,muB,t):
"""
Time evolution operator acting on 2 spins.
Parameters
----------
J : float
Pair-wise interaction energy.
muB : float
Magnetic energy of each spin with dipole moment mu in field B.
t : float
Timestep of each iteration.
Returns
-------
U : (2,2,2,2) ndarray
Non-unitary time evolution operator.
"""
hamiltonian = -J*np.kron(s(3),s(3))-\
(np.kron(s(1),s(0))+np.kron(s(0),s(1)))*muB/2
U = expm(-hamiltonian*t)
return np.reshape(U,(2,2,2,2))
def s(i):
"""
The Pauli Matrices I,X,Y,Z.
s(0) = I, s(1) = X, s(2) = Y, s(4) = Z.
Parameters
----------
i : index of Pauli Matrix.
Returns
-------
s : (2,2) ndarray
Pauli matrix with complex elements.
"""
if i == 0:
return np.eye(2,dtype=complex)
elif i == 1:
return np.array([[0,1],[1,0]],dtype=complex)
elif i == 2:
return np.array([[0,-1j],[1j,0]],dtype=complex)
elif i == 3:
return np.array([[1,0],[0,-1]],dtype=complex)
def pauli(paulis,positions,N):
mat = 1+0j
identity = s(0)
for i in xrange(N):
if i in positions:
mat = np.kron(mat,s(paulis[positions.index(i)]))
else:
mat = np.kron(mat,identity)
return mat
def expm(A):
"""
Matrix exponential by eigen-decomposition.
Parameters
----------
A : (N, N) array_like
Matrix to be exponentiated
Returns
-------
expm : (N, N) ndarray
Matrix exponential of A
"""
s,vr = np.linalg.eig(A)
vri = np.linalg.inv(vr)
return np.dot(np.dot(vr,np.diag(np.exp(s))),vri)
def displayState(state):
display = ""
N = int(np.log2(state.size))
for i in xrange(state.size):
display += " + %.4f*exp(%d"%(abs(state[i]),np.degrees(phase(state[i])))
display += u'\u00b0' + "i)|" + format(i,"0"+str(N)+"b") + ">"
if i % 2:
display += "\n"
print(display[:-1])
if __name__ == "__main__":
main()
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# -*- coding: utf-8 -*-\n\nimport os\nimport pygame\nimport random\nfrom math import pi, cos, acos, sin, sqrt\n\nimport classes.board\nimport classes.extras as ex\nimport classes.game_driver as gd\nimport classes.level_con... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\nimport os\nimport pygame\nimport random\nfrom math import pi, cos, acos, sin, sqrt\n\nimport classes.board\nimport classes.extras as ex\nimport classes.game_driver as gd\nimport c... | ```python
# -*- coding: utf-8 -*-
import os
import pygame
import random
from math import pi, cos, acos, sin, sqrt
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
import classes.simple_vector as sv
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 1, 1)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 19, 10)
def create_game_objects(self, level=1):
self.vis_buttons = [0, 0, 0, 0, 1, 0, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.hand_id = 0
self.hand_coords = [[], []]
self.board.draw_grid = False
if self.mainloop.scheme is not None:
color1 = self.mainloop.scheme.color1 # bright side of short hand
color3 = self.mainloop.scheme.color3 # inner font color
color5 = self.mainloop.scheme.color5 # dark side of short hand
color7 = self.mainloop.scheme.color7 # inner circle filling
color2 = self.mainloop.scheme.color2 # bright side of long hand
color4 = self.mainloop.scheme.color4 # ex.hsv_to_rgb(170,255,255)#outer font color
color6 = self.mainloop.scheme.color6 # dark side of long hand
color8 = self.mainloop.scheme.color8 # outer circle filling
self.h_col = color5
self.m_col = color6
white = self.mainloop.scheme.u_color
gray = (100, 100, 100)
else:
color1 = ex.hsv_to_rgb(225, 70, 230)
color3 = ex.hsv_to_rgb(225, 255, 255)
color5 = ex.hsv_to_rgb(225, 180, 240)
color7 = ex.hsv_to_rgb(225, 10, 255)
color2 = ex.hsv_to_rgb(170, 70, 230)
color4 = ex.hsv_to_rgb(170, 255, 255)
color6 = ex.hsv_to_rgb(170, 180, 240)
color8 = ex.hsv_to_rgb(170, 10, 255)
self.h_col = ex.hsv_to_rgb(225, 190, 220)
self.m_col = ex.hsv_to_rgb(170, 190, 220)
white = (255, 255, 255)
gray = (100, 100, 100)
transp = (0, 0, 0, 0)
self.colors = [color1, color2]
self.colors2 = [color3, color4]
self.colors3 = [color5, color6]
self.colors4 = [color7, color8]
data = [19, 10, True, True, False, False, False, False, False, True, True, 15]
h_pool = range(1, 13)
m_pool = range(0, 60)
# visual display properties
self.show_outer_ring = data[2]
self.show_minutes = data[3]
self.show_24h = data[4]
self.show_only_quarters_h = data[5]
self.show_only_quarters_m = data[6]
self.show_only_fives_m = data[7]
self.show_roman = data[8]
self.show_highlight = data[9]
self.show_hour_offset = data[10]
self.show_catalan = True
tt = [random.choice(h_pool), random.choice(m_pool)]
self.time = tt
self.tm = self.time[:]
self.digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
self.roman = ["I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X", "XI", "XII"]
if self.mainloop.lang.lang == "ca":
self.catalan = ["i5", "-5", "1/4", "i5", "-5", "2/4", "i5", "-5", "3/4", "i5", "-5", "12"]
else:
self.catalan = ["5", "10", "1/4", "20", "25", "1/2", "25", "20", "1/4", "10", "5", "0"]
x_count = self.get_x_count(data[1], even=False)
if x_count > data[0]:
data[0] = x_count
self.font_size = 0
self.data = data
self.layout.update_layout(data[0], data[1])
self.board.level_start(data[0], data[1], self.layout.scale)
self.size = self.board.scale * 10
self.board.add_unit(0, 0, 10, 10, classes.board.Ship, "", white, "", self.font_size)
self.clock_canvas = self.board.ships[-1]
self.board.active_ship = self.clock_canvas.unit_id
ans_offset = 10 + (data[0] - 15) // 2
self.board.add_unit(10, 0, data[0] - 10, 2, classes.board.Label, self.lang.d["Set_clock_instr"], white, "", 2)
self.board.units[-1].font_color = gray
self.board.add_unit(ans_offset + 1, 3, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.h_col)
self.h_plus = self.board.ships[-1]
self.board.add_unit(ans_offset + 3, 3, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_u_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.m_col)
self.m_plus = self.board.ships[-1]
self.board.add_unit(ans_offset + 1, 5, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.h_col)
self.h_min = self.board.ships[-1]
self.board.add_unit(ans_offset + 3, 5, 1, 1, classes.board.ImgCenteredShip, "", transp,
img_src='nav_d_mts.png', alpha=True)
self.board.ships[-1].set_tint_color(self.m_col)
self.m_min = self.board.ships[-1]
lst = [self.h_plus, self.h_min, self.m_plus, self.m_min]
for each in lst:
each.immobilize()
self.board.add_unit(ans_offset+1, 4, 1, 1, classes.board.Label, "%02d" % self.time[0], white, "", 0)
self.ans_h = self.board.units[-1]
self.board.add_unit(ans_offset + 2, 4, 1, 1, classes.board.Label, ":", white, "", 0)
self.board.add_unit(ans_offset + 3, 4, 1, 1, classes.board.Label, "%02d" % self.time[1], white, "", 0)
self.ans_m = self.board.units[-1]
self.ans_h.immobilize()
self.ans_m.immobilize()
self.ans_h.font_color = color3
self.ans_m.font_color = color4
self.center = [self.size // 2, self.size // 2]
self.clock_canvas.font = self.clock_canvas.board.font_sizes[2]
self.clock_canvas.font2 = self.clock_canvas.board.font_sizes[7]
self.clock_canvas.font3 = self.clock_canvas.board.font_sizes[26]
self.clock_canvas.immobilize()
self.board.add_unit(10, 6, data[0] - 10, 2, classes.board.Letter, "", white, "", 2)
self.text_time = self.board.ships[-1]
self.text_time.immobilize()
self.text_time.font_color = gray
self.update_text_time()
self.canvas = pygame.Surface((self.size, self.size - 1))
if self.mainloop.scheme is not None:
self.canvas.fill(self.mainloop.scheme.u_color)
else:
self.canvas.fill((255, 255, 255))
tint_h = self.colors3[0]
tint_m = self.colors3[1]
shrink = 0.72
self.whs = int(self.size * shrink)
self.hand_h = self.scalled_img(
pygame.image.load(os.path.join('res', 'images', "clock_h.png")).convert_alpha(), self.whs, self.whs)
self.hand_h.fill(tint_h, special_flags=pygame.BLEND_ADD)
self.hand_m = self.scalled_img(
pygame.image.load(os.path.join('res', 'images', "clock_m.png")).convert_alpha(), self.whs, self.whs)
self.hand_m.fill(tint_m, special_flags=pygame.BLEND_ADD)
self.pivot = [self.whs // 2, self.whs // 2]
self.hands = [self.hand_h, self.hand_m]
self.hands_vars()
self.draw_hands()
self.clock_canvas.hidden_value = [2, 3]
self.clock_canvas.font_color = color2
self.clock_canvas.painting = self.canvas.copy()
def update_text_time(self):
if self.mainloop.lang.lang == "ca":
self.lang.numbers[0] = "un"
tt = self.time
if self.mainloop.m.game_variant in [0, 2]:
if self.mainloop.m.game_var2 == 0:
self.text_string = self.lang.time2str(tt[0], tt[1])
if self.lang.lang == "ru":
spk_txt = self.lang.time2spk(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
else:
self.text_string = self.lang.time2str_short(tt[0], tt[1])
if self.lang.lang == "ru":
spk_txt = self.lang.time2spk_short(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
if self.lang.lang == "he":
spk_txt = self.lang.time2spk(tt[0], tt[1])
self.text_time.speaker_val = spk_txt
self.text_time.speaker_val_update = False
self.text_time.value = self.text_string
self.text_time.update_me = True
self.ans_h.value = "%02d" % self.time[0]
self.ans_m.value = "%02d" % self.time[1]
self.ans_h.update_me = True
self.ans_m.update_me = True
if self.mainloop.lang.lang == "ca":
self.lang.numbers[0] = "u"
def hands_vars(self):
self.angle_step_12 = 2 * pi / 12
self.angle_step_60 = 2 * pi / 60
self.angle_start = -pi / 2
self.r = self.size // 3 + self.size // 10
self.rs = [self.r * 0.6, self.r * 0.85, self.r * 0.6] # rings
self.rs2 = [self.r * 0.85, self.r * 0.6, self.r * 0.34] # numbers
self.rs3 = [self.r * 0.45, self.r * 0.7, self.r * 0.6] # hands
def draw_hands(self):
if self.show_hour_offset:
a1 = self.angle_start + (2 * pi / 12) * self.time[0] + (self.angle_step_12 * (2 * pi / 60) * self.time[
1]) / (2 * pi)
else:
a1 = self.angle_start + (2 * pi / 12) * self.time[0]
a2 = self.angle_start + (2 * pi / 60) * self.time[1]
self.angles = [a1, a2]
rs = self.rs
rs2 = self.rs2
rs3 = self.rs3
time = self.time
if self.show_outer_ring:
pygame.draw.circle(self.canvas, self.colors4[1], self.center, int(rs[1] + 10), 0)
pygame.draw.circle(self.canvas, self.colors2[1], self.center, int(rs[1] + 10), 1)
pygame.draw.circle(self.canvas, self.colors4[0], self.center, int(rs[2] + 10), 0)
pygame.draw.circle(self.canvas, self.colors2[0], self.center, int(rs[2] + 10), 1)
if self.show_outer_ring:
for i in range(60):
val = str(i + 1)
if self.show_only_quarters_m:
if (i + 1) % 15 != 0:
val = ""
elif self.show_only_fives_m:
if (i + 1) % 5 != 0:
val = ""
if i == 59:
val = "0"
a = self.angle_start + self.angle_step_60 * (i + 1)
if self.show_minutes:
font_size = self.clock_canvas.font3.size(val)
if not self.show_highlight or (i + 1 == time[1] or (time[1] == 0 and i == 59)):
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors2[1])
else:
text = self.clock_canvas.font3.render("%s" % (val), 1, self.colors[1])
x3 = (rs[1] + 15 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - \
font_size[0] / 2
y3 = (rs[1] + 15 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - \
font_size[1] / 2
self.canvas.blit(text, (x3, y3))
if self.show_only_quarters_m or self.show_only_fives_m:
if (i + 1) % 15 == 0:
marklen = 20 * self.layout.scale / 34
elif (i + 1) % 5 == 0:
marklen = 15 * self.layout.scale / 34
else:
marklen = 10 * self.layout.scale / 34
else:
marklen = 15 * self.layout.scale / 34
else:
if (i + 1) % 15 == 0:
marklen = 20 * self.layout.scale / 34
elif (i + 1) % 5 == 0:
marklen = 15 * self.layout.scale / 34
else:
marklen = 10 * self.layout.scale / 34
if self.show_outer_ring:
x1 = (rs[1] + 10) * cos(a) + self.center[0]
y1 = (rs[1] + 10) * sin(a) + self.center[1]
x2 = (rs[1] + marklen) * cos(a) + self.center[0]
y2 = (rs[1] + marklen) * sin(a) + self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[1], [x1, y1], [x2, y2])
for i in range(12):
val = str(i + 1)
if self.show_only_quarters_h:
if (i + 1) % 3 != 0:
val = ""
a = self.angle_start + self.angle_step_12 * (i + 1)
x1 = (rs[2] - 3) * cos(a) + self.center[0]
y1 = (rs[2] - 3) * sin(a) + self.center[1]
x2 = (rs[2] + 10) * cos(a) + self.center[0]
y2 = (rs[2] + 10) * sin(a) + self.center[1]
pygame.draw.aaline(self.canvas, self.colors2[0], [x1, y1], [x2, y2])
if self.show_roman:
val = self.hour_to_roman(val)
if not self.show_highlight or i + 1 == time[0]:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font.render("%s" % (val), 1, self.colors[0])
font_size = self.clock_canvas.font.size(val)
if self.show_catalan:
val2 = self.catalan[i]
if (i + 1) * 5 == time[1]:
text2 = self.clock_canvas.font.render("%s" % (val2), 1, self.colors2[1])
else:
text2 = self.clock_canvas.font.render("%s" % (val2), 1, self.colors[1])
font_size2 = self.clock_canvas.font.size(val2)
if self.show_roman:
text_angle = -(360 / 12.0) * (i + 1)
text = pygame.transform.rotate(text, text_angle)
rect = text.get_rect()
x3 = (rs2[2] + 20 + font_size[1] // 2) * cos(a) + self.center[0] - rect.width / 2
y3 = (rs2[2] + 20 + font_size[1] // 2) * sin(a) + self.center[1] - rect.height / 2
else:
x3 = int(
(rs2[2] + 10 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - font_size[
0] / 2)
y3 = int(
(rs2[2] + 10 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - font_size[
1] / 2)
xc = int(
(rs[2] + 15 * self.layout.scale / 34 + font_size[1] // 2) * cos(a) + self.center[0] - font_size2[
0] / 2)
yc = int(
(rs[2] + 15 * self.layout.scale / 34 + font_size[1] // 2) * sin(a) + self.center[1] - font_size2[
1] / 2)
self.canvas.blit(text, (x3, y3))
self.canvas.blit(text2, (xc, yc))
if self.show_24h:
if i + 13 == 24:
val = "0"
v = 0
else:
val = str(i + 13)
v = i + 13
font_size = self.clock_canvas.font2.size(val)
if not self.show_highlight or v == time[0]:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors2[0])
else:
text = self.clock_canvas.font2.render("%s" % (val), 1, self.colors[0])
x3 = (rs2[0] + font_size[1] // 2) * cos(a) + self.center[0] - font_size[0] / 2
y3 = (rs2[0] + font_size[1] // 2) * sin(a) + self.center[1] - font_size[1] / 2
self.canvas.blit(text, (x3, y3))
hand_width = [self.r // 14, self.r // 18]
start_offset = [self.size // 18, self.size // 22]
for i in range(2):
# angle for line
angle = self.angles[i] # angle_start + angle_step*i
x0 = self.center[0] - start_offset[i] * cos(angle)
y0 = self.center[1] - start_offset[i] * sin(angle)
# Calculate the x,y for the end point
x1 = rs3[i] * cos(angle) + self.center[0]
y1 = rs3[i] * sin(angle) + self.center[1]
x2 = hand_width[i] * cos(angle - pi / 2) + self.center[0]
y2 = hand_width[i] * sin(angle - pi / 2) + self.center[1]
x3 = hand_width[i] * cos(angle + pi / 2) + self.center[0]
y3 = hand_width[i] * sin(angle + pi / 2) + self.center[1]
points = [[x0, y0], [x2, y2], [x1, y1], [x3, y3]]
self.hand_coords[i] = points
self.clock_canvas.update_me = True
for i in range(0, 2):
angle = 360 - ((self.angles[i] + pi / 2) * 180 / pi)
img = self.rotatePivoted(self.hands[i], angle, self.pivot)
self.canvas.blit(img[0], ((self.size - self.whs) // 2 + img[1][0], (self.size - self.whs) // 2 + img[1][1]))
self.update_text_time()
self.clock_canvas.update_me = True
self.mainloop.redraw_needed[0] = True
def scalled_img(self, image, new_w, new_h):
'scales image depending on pygame version and bit depth using either smoothscale or scale'
if image.get_bitsize() in [32, 24] and pygame.version.vernum >= (1, 8):
img = pygame.transform.smoothscale(image, (new_w, new_h))
else:
img = pygame.transform.scale(image, (new_w, new_h))
return img
def rotatePivoted(self, img, angle, pivot):
image = pygame.transform.rotate(img, angle)
rect = image.get_rect()
rect.center = pivot
return image, rect
def hour_to_roman(self, val):
val = int(val)
return self.roman[val - 1]
def vector_len(self, v):
return sqrt(v[0] ** 2 + v[1] ** 2)
def scalar_product(self, v1, v2):
return sum([v1[i] * v2[i] for i in range(len(v1))])
def angle(self, v1, v2):
return self.scalar_product(v1, v2) / (self.vector_len(v1) * self.vector_len(v2))
def is_contained(self, pos, coords_id=0):
v0 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][1])
v1 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][1])
v2 = sv.Vector2.from_points(self.hand_coords[coords_id][2], self.hand_coords[coords_id][3])
v3 = sv.Vector2.from_points(self.hand_coords[coords_id][0], self.hand_coords[coords_id][3])
v4 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][1])
v5 = sv.Vector2.from_points(pos, self.hand_coords[coords_id][3])
a1 = 1 - self.angle(v0, v1) # corner 1
a2 = 1 - self.angle(v2, v3) # corner 2
a3 = 1 - self.angle(v0, v4) # point to arm1 of corner1
a4 = 1 - self.angle(v1, v4) # point to arm2 of corner1
a5 = 1 - self.angle(v2, v5) # point to arm1 of corner2
a6 = 1 - self.angle(v3, v5) # point to arm2 of corner2
if (a3 + a4) < a1 and (a5 + a6) < a2:
return True
return False
def current_angle(self, pos, r):
cosa = (pos[0] - self.center[0]) / r
sina = (pos[1] - self.center[1]) / r
if 0 <= cosa <= 1 and -1 <= sina <= 0:
angle = pi / 2 - acos(cosa)
elif 0 <= cosa <= 1 and 0 <= sina <= 1:
angle = acos(cosa) + pi / 2 # ok
elif -1 <= cosa <= 0 and 0 <= sina <= 1:
angle = acos(cosa) + pi / 2 # ok
elif -1 <= cosa <= 0 and -1 <= sina <= 0:
angle = 2 * pi + pi / 2 - acos(cosa)
return angle
def handle(self, event):
gd.BoardGame.handle(self, event)
self.tm = self.time[:]
if event.type == pygame.MOUSEMOTION and self.hand_id > 0:
pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]
r = self.vector_len([pos[0] - self.center[0], pos[1] - self.center[1]])
if r == 0:
r = 0.1
if self.hand_id == 1:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
self.tm[0] = 12
else:
self.tm[0] = int(h)
elif self.hand_id == 2:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
if 0 <= self.tm[1] < 5 and 55 <= self.time[1] <= 59:
if self.tm[0] == 12:
self.tm[0] = 1
else:
self.tm[0] += 1
elif 0 <= self.time[1] < 5 and 55 <= self.tm[1] <= 59:
if self.tm[0] == 1:
self.tm[0] = 12
else:
self.tm[0] -= 1
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
active = self.board.active_ship
pos = [event.pos[0] - self.layout.game_left, event.pos[1] - self.layout.top_margin]
if active == 0:
r = self.vector_len([pos[0] - self.center[0], pos[1] - self.center[1]])
if r == 0: r = 0.1
self.hand_id = 0
if self.is_contained(pos, coords_id=0):
self.hand_id = 1
elif self.is_contained(pos, coords_id=1):
self.hand_id = 2
elif self.rs[0] * 1.1 > r:
h = (self.current_angle(pos, r)) / self.angle_step_12
if int(h) == 0:
h = 12
self.tm[0] = int(h)
else:
m = (self.current_angle(pos, r)) / self.angle_step_60
self.tm[1] = int(m)
elif active == 1:
self.change_time_btn(1, 0)
elif active == 2:
self.change_time_btn(0, 1)
elif active == 3:
self.change_time_btn(-1, 0)
elif active == 4:
self.change_time_btn(0, -1)
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
self.hand_id = 0
if self.tm != self.time:
self.time = self.tm[:]
self.draw_hands()
self.clock_canvas.painting = self.canvas.copy()
def change_time_btn(self, h, m):
if h != 0:
if h == 1 and self.tm[0] == 12:
self.tm[0] = 1
elif h == -1 and self.tm[0] == 1:
self.tm[0] = 12
elif h > 1 and self.tm[0] > 12 - h:
self.tm[0] = (self.tm[0] + h) - 12
else:
self.tm[0] += h
if m != 0:
if m == 1 and self.tm[1] == 59:
self.tm[1] = 0
self.change_time_btn(1, 0)
elif m == -1 and self.tm[1] == 0:
self.tm[1] = 59
self.change_time_btn(-1, 0)
elif m > 1 and self.tm[1] > 59 - m:
self.change_time_btn(1, 0)
self.tm[1] = (self.tm[1] + m) - 60
else:
self.tm[1] += m
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def check_result(self):
pass
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#!/usr/bin/env python\n\n\"\"\"\n@package mi.dataset.parser.test.test_dosta_abcdjm_cspp\n@file marine-integrations/mi/dataset/parser/test/test_dosta_abcdjm_cspp.py\n@author Mark Worden\n@brief Test code for a dosta_abcdjm_cspp data... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"\n@package mi.dataset.parser.test.test_dosta_abcdjm_cspp\n@file marine-integrations/mi/dataset/parser/test/test_dosta_abcdjm_cspp.py\n@author Mark Worden\n@brief Test code for a dosta_... | ```python
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_dosta_abcdjm_cspp
@file marine-integrations/mi/dataset/parser/test/test_dosta_abcdjm_cspp.py
@author Mark Worden
@brief Test code for a dosta_abcdjm_cspp data parser
"""
import os
import numpy
import yaml
import copy
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.driver.dosta_abcdjm.cspp.driver import DataTypeKey
from mi.dataset.parser.cspp_base import StateKey, METADATA_PARTICLE_CLASS_KEY, DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.dosta_abcdjm_cspp import DostaAbcdjmCsppParser
from mi.dataset.parser.dosta_abcdjm_cspp import DostaAbcdjmCsppMetadataRecoveredDataParticle, \
DostaAbcdjmCsppInstrumentRecoveredDataParticle, DostaAbcdjmCsppMetadataTelemeteredDataParticle, \
DostaAbcdjmCsppInstrumentTelemeteredDataParticle
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver', 'dosta_abcdjm', 'cspp', 'resource')
@attr('UNIT', group='mi')
class DostaAbcdjmCsppParserUnitTestCase(ParserUnitTestCase):
"""
dosta_abcdjm_cspp Parser unit test suite
"""
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
self.file_ingested_value = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def exception_callback(self, exception):
""" Callback method to watch what comes in via the exception callback """
self.exception_callback_value = exception
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED: {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppMetadataRecoveredDataParticle,
DATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppInstrumentRecoveredDataParticle,
}
},
DataTypeKey.DOSTA_ABCDJM_CSPP_TELEMETERED: {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_cspp',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppMetadataTelemeteredDataParticle,
DATA_PARTICLE_CLASS_KEY: DostaAbcdjmCsppInstrumentTelemeteredDataParticle,
}
},
}
# Define test data particles and their associated timestamps which will be
# compared with returned results
self.file_ingested_value = None
self.state_callback_value = None
self.publish_callback_value = None
self.exception_callback_value = None
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' %(i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.6f\n' % (val.get('value_id'), val.get('value')))
elif isinstance(val.get('value'), str):
fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def create_yml(self):
"""
This utility creates a yml file
"""
fid = open(os.path.join(RESOURCE_PATH, '11194982_PPD_OPT.txt'))
test_buffer = fid.read()
fid.close()
self.stream_handle = StringIO(test_buffer)
self.parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_TELEMETERED),
None, self.stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = self.parser.get_records(20)
log.info("Exception callback value: %s", self.exception_callback_value)
self.particle_to_yml(particles, '11194982_PPD_OPT.yml')
def test_simple(self):
"""
Read test data and pull out data particles.
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(5)
log.info("Exception callback value: %s", self.exception_callback_value)
self.assertTrue(self.exception_callback_value is None)
self.assertTrue(len(particles) == 5)
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
for i in range(len(particles)):
self.assert_result(expected_results['data'][i], particles[i])
stream_handle.close()
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
# Let's attempt to retrieve 20 particles
particles = parser.get_records(20)
log.info("Exception callback value: %s", self.exception_callback_value)
# Should end up with 20 particles
self.assertTrue(len(particles) == 20)
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
for i in range(len(particles)):
self.assert_result(expected_results['data'][i], particles[i])
def test_long_stream(self):
"""
Test a long stream
"""
file_path = os.path.join(RESOURCE_PATH, '11079364_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
# Let's attempt to retrieve 2000 particles
particles = parser.get_records(300)
log.info("Num particles: %s", len(particles))
log.info("Exception callback value: %s", self.exception_callback_value)
# Should end up with 272 particles
self.assertTrue(len(particles) == 272)
stream_handle.close()
def test_state_after_one_record_retrieval(self):
"""
This test makes sure that we get the correct particles upon requesting one record at
a time.
"""
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 1)
log.info("11111111 Read State: %s", parser._read_state)
log.info("11111111 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][0], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("22222222 Read State: %s", parser._read_state)
log.info("22222222 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][1], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("33333333 Read State: %s", parser._read_state)
log.info("33333333 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 425, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][2], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("44444444 Read State: %s", parser._read_state)
log.info("44444444 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 518, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][3], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("55555555 Read State: %s", parser._read_state)
log.info("55555555 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 611, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][4], particles[0])
def test_state_after_two_record_retrievals(self):
"""
This test makes sure that we get the correct particles upon requesting two records at
a time.
"""
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(2)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 2)
for i in range(len(particles)):
self.assert_result(expected_results['data'][i], particles[i])
log.info("11111111 Read State: %s", parser._read_state)
log.info("11111111 State: %s", parser._state)
the_new_state = {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True}
log.info("11111111 new parser state: %s", the_new_state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
the_new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(2)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 2)
for i in range(len(particles)):
self.assert_result(expected_results['data'][i+2], particles[i])
log.info("22222222 Read State: %s", parser._read_state)
log.info("22222222 State: %s", parser._state)
the_new_state = {StateKey.POSITION: 480, StateKey.METADATA_EXTRACTED: True}
log.info("22222222 new parser state: %s", the_new_state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
the_new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(2)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 2)
for i in range(len(particles)):
self.assert_result(expected_results['data'][i+4], particles[i])
log.info("33333333 Read State: %s", parser._read_state)
log.info("33333333 State: %s", parser._state)
def test_position_and_metadata_extracted_state(self):
"""
This test makes sure that we retrieve the metadata record upon resetting the state position to 0
and setting the METADATA_EXTRACTED to False.
"""
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 1)
log.info("11111111 Read State: %s", parser._read_state)
log.info("11111111 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][0], particles[0])
new_state = {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: False}
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
self.assertTrue(len(particles) == 1)
log.info("22222222 Read State: %s", parser._read_state)
log.info("22222222 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][0], particles[0])
new_state = {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True}
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
self.assertTrue(len(particles) == 1)
log.info("22222222 Read State: %s", parser._read_state)
log.info("22222222 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][1], particles[0])
def test_midstate_start(self):
"""
This test makes sure that we retrieve the correct particles upon starting with an offsetted state.
"""
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
initial_state = {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True}
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
initial_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(2)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 2)
for i in range(len(particles)):
self.assert_result(expected_results['data'][i+2], particles[i])
log.info("******** Read State: %s", parser._read_state)
log.info("******** State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 518, StateKey.METADATA_EXTRACTED: True})
def test_state_reset(self):
"""
This test makes sure that we retrieve the correct particles upon resetting the state to a prior position.
"""
expected_results = self.get_dict_from_yml('11079894_PPB_OPT.yml')
file_path = os.path.join(RESOURCE_PATH, '11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("Num particles: %s", len(particles))
self.assertTrue(len(particles) == 1)
log.info("11111111 Read State: %s", parser._read_state)
log.info("11111111 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][0], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("22222222 Read State: %s", parser._read_state)
log.info("22222222 State: %s", parser._state)
self.assertTrue(len(particles) == 1)
self.assertTrue(parser._state == {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][1], particles[0])
new_state = copy.copy(parser._state)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(1)
log.info("33333333 Read State: %s", parser._read_state)
log.info("33333333 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 425, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][2], particles[0])
new_state = {StateKey.POSITION: 0, StateKey.METADATA_EXTRACTED: True}
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
new_state, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
# Now retrieve two particles. We should end up with the metadata and first data record
particles = parser.get_records(1)
log.info("44444444 Read State: %s", parser._read_state)
log.info("44444444 State: %s", parser._state)
self.assertTrue(parser._state == {StateKey.POSITION: 332, StateKey.METADATA_EXTRACTED: True})
self.assert_result(expected_results['data'][1], particles[0])
def test_bad_data_record(self):
"""
Ensure that bad data is skipped when it exists.
"""
file_path = os.path.join(RESOURCE_PATH, 'BadDataRecord_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
log.info(self.exception_callback_value)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
parser.get_records(1)
log.info("Exception callback value: %s", self.exception_callback_value)
self.assertTrue(self.exception_callback_value is not None)
stream_handle.close()
def test_bad_header_source_file_name(self):
"""
Ensure that bad data is skipped when it exists.
"""
file_path = os.path.join(RESOURCE_PATH, 'BadHeaderSourceFileName_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
log.info(self.exception_callback_value)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
parser.get_records(1)
log.info("Exception callback value: %s", self.exception_callback_value)
self.assertTrue(self.exception_callback_value != None)
stream_handle.close()
def test_bad_header_start_date(self):
"""
Ensure that bad data is skipped when it exists.
"""
file_path = os.path.join(RESOURCE_PATH, 'BadHeaderProcessedData_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
log.info(self.exception_callback_value)
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
# parser should return metadata without start date filled in
parser.get_records(1)
self.assertEqual(self.exception_callback_value, None)
stream_handle.close()
def test_linux_source_path_handling(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
file_path = os.path.join(RESOURCE_PATH, 'linux_11079894_PPB_OPT.txt')
stream_handle = open(file_path, 'rb')
parser = DostaAbcdjmCsppParser(self.config.get(DataTypeKey.DOSTA_ABCDJM_CSPP_RECOVERED),
None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
particles = parser.get_records(5)
self.assertTrue(len(particles) == 5)
expected_results = self.get_dict_from_yml('linux.yml')
for i in range(len(particles)):
self.assert_result(expected_results['data'][i], particles[i])
stream_handle.close()
def assert_result(self, test, particle):
"""
Suite of tests to run against each returned particle and expected
results of the same. The test parameter should be a dictionary
that contains the keys to be tested in the particle
the 'internal_timestamp' and StateKey.POSITION keys are
treated differently than others but can be verified if supplied
"""
particle_dict = particle.generate_dict()
#for efficiency turn the particle values list of dictionaries into a dictionary
particle_values = {}
for param in particle_dict.get('values'):
particle_values[param['value_id']] = param['value']
# compare each key in the test to the data in the particle
for key in test:
expected_results = test[key]
#get the correct data to compare to the test
if key == 'internal_timestamp':
particle_data = particle.get_value('internal_timestamp')
#the timestamp is in the header part of the particle
# log.info("internal_timestamp %.10f", particle_data)
else:
particle_data = particle_values.get(key)
#others are all part of the parsed values part of the particle
if particle_data is None:
#generally OK to ignore index keys in the test data, verify others
log.warning("\nWarning: assert_result ignoring test key %s, does not exist in particle", key)
else:
log.info("Key: %s", key)
log.info("Expected Results Type: %s: ", type(expected_results))
if isinstance(expected_results, float):
# log.info("Expected data: %.10f", expected_results)
# log.info("Actual data: %.10f", particle_data)
# slightly different test for these values as they are floats.
compare = numpy.abs(expected_results - particle_data) <= 1e-5
self.assertTrue(compare)
else:
# log.info("Expected data: %s", expected_results)
# log.info("Actual data: %s", particle_data)
# otherwise they are all ints and should be exactly equal
self.assertEqual(expected_results, particle_data)
@staticmethod
def get_dict_from_yml(filename):
"""
This utility routine loads the contents of a yml file
into a dictionary
"""
fid = open(os.path.join(RESOURCE_PATH, filename), 'r')
result = yaml.load(fid)
fid.close()
return result
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n\n# Based on previous work by\n# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)\n# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)\n\n# A... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n# Based on previous work by\n# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)\n# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in... | ```python
#!/usr/bin/env python
# Based on previous work by
# Charles Menguy (see: http://stackoverflow.com/questions/10217067/implementing-a-full-python-unix-style-daemon-process)
# and Sander Marechal (see: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/)
# Adapted by M.Hendrix [2015] (deprecated)
# daemon98.py uploads data to the server.
import syslog, traceback
import os, sys, shutil, glob, time, commands
from libdaemon import Daemon
import ConfigParser
import subprocess
DEBUG = False
leaf = os.path.realpath(__file__).split('/')[-2]
class MyDaemon(Daemon):
def run(self):
iniconf = ConfigParser.ConfigParser()
inisection = "98"
home = os.path.expanduser('~')
s = iniconf.read(home + '/' + leaf + '/config.ini')
if DEBUG: print "config file : ", s
if DEBUG: print iniconf.items(inisection)
reportTime = iniconf.getint(inisection, "reporttime")
cycles = iniconf.getint(inisection, "cycles")
samplesperCycle = iniconf.getint(inisection, "samplespercycle")
flock = iniconf.get(inisection, "lockfile")
samples = samplesperCycle * cycles # total number of samples averaged
sampleTime = reportTime/samplesperCycle # time [s] between samples
cycleTime = samples * sampleTime # time [s] per cycle
myname = os.uname()[1]
mount_path = '/srv/array1/dataspool/'
remote_path = mount_path + myname
remote_lock = remote_path + '/client.lock'
while True:
try:
startTime=time.time()
if os.path.exists(remote_path):
do_mv_data(remote_path)
else:
if DEBUG:print remote_path + " not available"
waitTime = sampleTime - (time.time() - startTime) - (startTime%sampleTime)
if (waitTime > 0):
if DEBUG:print "Waiting {0} s".format(waitTime)
time.sleep(waitTime)
except Exception as e:
if DEBUG:
print "Unexpected error:"
print e.message
syslog.syslog(syslog.LOG_ALERT,e.__doc__)
syslog_trace(traceback.format_exc())
raise
def do_mv_data(rpath):
hostlock = rpath + '/host.lock'
clientlock = rpath + '/client.lock'
count_internal_locks=1
# wait 5 seconds for processes to finish
time.sleep(5)
while os.path.isfile(hostlock):
if DEBUG:print "hostlock exists"
# wait while the server has locked the directory
time.sleep(1)
# server already sets the client.lock. Do it anyway.
lock(clientlock)
# prevent race conditions
while os.path.isfile(hostlock):
if DEBUG:print "hostlock exists. WTF?"
# wait while the server has locked the directory
time.sleep(1)
while (count_internal_locks > 0):
time.sleep(1)
count_internal_locks=0
for fname in glob.glob(r'/tmp/' + leaf + '/*.lock'):
count_internal_locks += 1
if DEBUG:print "{0} internal locks exist".format(count_internal_locks)
for fname in glob.glob(r'/tmp/' + leaf + '/*.csv'):
if os.path.isfile(clientlock) and not (os.path.isfile(rpath + "/" + os.path.split(fname)[1])):
if DEBUG:print "moving data " + fname
shutil.move(fname, rpath)
unlock(clientlock)
if DEBUG:print "unlocked..."
def lock(fname):
fd = open(fname, 'a').close()
def unlock(fname):
if os.path.isfile(fname):
os.remove(fname)
def syslog_trace(trace):
# Log a python stack trace to syslog
log_lines = trace.split('\n')
for line in log_lines:
if line:
syslog.syslog(syslog.LOG_ALERT,line)
if __name__ == "__main__":
daemon = MyDaemon('/tmp/' + leaf + '/98.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'foreground' == sys.argv[1]:
# assist with debugging.
print "Debug-mode started. Use <Ctrl>+C to stop."
DEBUG = True
if DEBUG:
logtext = "Daemon logging is ON"
syslog.syslog(syslog.LOG_DEBUG, logtext)
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: {0!s} start|stop|restart|foreground".format(sys.argv[0])
sys.exit(2)
``` |
[
{
"content": "Here is the code content:\n```python\n# Copyright 2015 Internap.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# Copyright 2015 Internap.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/li... | ```python
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import unittest
from functools import wraps
from unittest import SkipTest
from netman import raw_or_json
from netman.main import app
class ConfiguredTestCase(unittest.TestCase):
switch_specs = None
def setUp(self):
tested_switch = type(self).switch_specs
self.switch_hostname = tested_switch["hostname"]
self.switch_port = tested_switch["port"]
self.switch_type = tested_switch["model"]
self.switch_username = tested_switch["username"]
self.switch_password = tested_switch["password"]
self.test_port = tested_switch["test_port_name"]
def get(self, relative_url, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.get(**self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return json.loads(r.data)
def post(self, relative_url, data=None, raw_data=None, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.post(data=raw_or_json(raw_data, data), **self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def put(self, relative_url, data=None, raw_data=None, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.put(data=raw_or_json(raw_data, data), **self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def delete(self, relative_url, fail_on_bad_code=True):
with app.test_client() as http_client:
r = http_client.delete(**self.request(relative_url))
if fail_on_bad_code and r.status_code >= 400:
raise AssertionError("Call to %s returned %s : %s" % (relative_url, r.status_code, r.data))
return r
def request(self, relative_url):
logging.info("Querying " + ("http://netman.example.org%s" % relative_url.format(switch=self.switch_hostname, port=self.test_port)))
headers = {
'Netman-Model': self.switch_type,
'Netman-Username': self.switch_username,
'Netman-Password': self.switch_password,
'Netman-Port': self.switch_port
}
return {
"path": relative_url.format(switch=self.switch_hostname, port=self.test_port),
"headers": headers
}
def get_vlan(self, number):
data = self.get("/switches/{switch}/vlans")
vlan = next((vlan for vlan in data if vlan["number"] == number), None)
if not vlan:
raise AssertionError("Vlan #{} not found".format(number))
return vlan
def skip_on_switches(*to_skip):
def resource_decorator(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
if not self.switch_type in to_skip:
return fn(self, *args, **kwargs)
else:
raise SkipTest('Test not executed on Switch model %s' % self.switch_type)
return wrapper
return resource_decorator
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# Copyright (C) 2015-2017 Jan Blechta\n#\n# This file is part of FENaPack.\n#\n# FENaPack is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, eit... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# Copyright (C) 2015-2017 Jan Blechta\n#\n# This file is part of FENaPack.\n#\n# FENaPack is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software... | ```python
# Copyright (C) 2015-2017 Jan Blechta
#
# This file is part of FENaPack.
#
# FENaPack is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FENaPack is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
"""Compiled extensions for fieldsplit modules"""
from dolfin import compile_cpp_code
import petsc4py
import os
__all__ = ['dofmap_dofs_is', 'SubfieldBC']
dofmap_dofs_is_cpp_code = """
#include <pybind11/pybind11.h>
#include <pybind11/eigen.h>
namespace py = pybind11;
#include <vector>
#include <petscis.h>
#include <dolfin/fem/GenericDofMap.h>
#include <dolfin/la/PETScObject.h>
#include <dolfin/log/log.h>
#include "petsc_casters.h"
IS dofmap_dofs_is(const dolfin::GenericDofMap& dofmap)
{
PetscErrorCode ierr;
const std::vector<dolfin::la_index> dofs = dofmap.dofs();
IS is;
dolfin_assert(dofmap.index_map());
ierr = ISCreateGeneral(dofmap.index_map()->mpi_comm(), dofs.size(),
dofs.data(), PETSC_COPY_VALUES, &is);
if (ierr != 0)
dolfin::PETScObject::petsc_error(ierr, "field_split.py", "ISCreateGeneral");
return is;
}
PYBIND11_MODULE(SIGNATURE, m)
{
m.def("dofmap_dofs_is", &dofmap_dofs_is);
}
namespace pybind11
{
namespace detail
{
PETSC_CASTER_MACRO(IS, is);
}
}
"""
# Load and wrap compiled function dofmap_dofs_is
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
module_dofs = compile_cpp_code(dofmap_dofs_is_cpp_code,
include_dirs=[path, petsc4py.get_include()])
def dofmap_dofs_is(dofmap):
"""Converts DofMap::dofs() to IS.
This function is intended to circumvent NumPy which would be
involved in code like::
iset = PETSc.IS().createGeneral(dofmap.dofs(),
comm=dofmap.index_map().mpi_comm())
"""
iset = module_dofs.dofmap_dofs_is(dofmap)
iset.decRef()
assert iset.getRefCount() == 1
return iset
dofmap_dofs_is.__doc__ += module_dofs.dofmap_dofs_is.__doc__
# Load compiled class SubfieldBC
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
code = open(os.path.join(path, "SubfieldBC.h")).read()
module_bc = compile_cpp_code(code, include_dirs=[path, petsc4py.get_include()])
SubfieldBC = module_bc.SubfieldBC
``` |
[
{
"content": "Here is the code block:\n```python\n# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2017 Tijme Gommers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Softwa... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2017 Tijme Gommers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\... | ```python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from nyawc.helpers.URLHelper import URLHelper
class TestUrlHelper(unittest.TestCase):
"""The TestUrlHelper class checks if the methods in the URLHelper work correctly."""
def test_make_absolute(self):
"""Check if the make absolute method works correctly."""
host = "https://example.ltd/current"
tests = [
("https://example.ltd/new.html", "new.html"),
("https://example.ltd/new", "new"),
("https://example.ltd/new1/new2", "new1/new2"),
("https://example.ltd/new1/new3", "/new1/new3"),
("https://example.ltd/current?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_with_base(self):
"""Check if the make absolute method works correctly in interpreted with a base URL."""
host = "https://example.ltd/base/"
tests = [
("https://example.ltd/base/new.html", "new.html"),
("https://example.ltd/base/new", "new"),
("https://example.ltd/base/new1/new2", "new1/new2"),
("https://example.ltd/new1/new2", "/new1/new2"),
("https://example.ltd/base/?a=a", "?a=a")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_make_absolute_folder_traversal(self):
"""Ensure folder traversal works correclty."""
host = "https://example.ltd/dir1/dir2/dir3"
tests = [
("https://example.ltd/dir1/dir2", "../"),
("https://example.ltd/dir1", "../../"),
("https://example.ltd", "../../../"),
("https://example.ltd", "../../../../"),
("https://example.ltd", "../../../../../")
]
for test in tests:
self.assertEqual(URLHelper.make_absolute(host, test[1]), test[0])
def test_get_protocol(self):
"""Check if the get protocol method works correctly."""
tests = [
("", "domain.tld"),
("http", "http://domain.tld"),
("arbitrary", "arbitrary://omain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_protocol(test[1]), test[0])
def test_get_subdomain(self):
"""Check if the get subdomain method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("", "http://domain.tld"),
("sub1", "http://sub1.domain.tld"),
("sub2.sub1", "http://sub2.sub1.domain.tld"),
("sub3.sub2.sub1", "http://sub3.sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_subdomain(test[1]), test[0])
def test_get_hostname(self):
"""Check if the get hostname method works correctly."""
tests = [
("", ""),
("", "http://"),
("domain", "http://domain"),
("domain", "http://domain.tld"),
("domain", "http://sub1.domain.tld"),
("domain", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_hostname(test[1]), test[0])
def test_get_tld(self):
"""Check if the get tld method works correctly."""
tests = [
("", ""),
("", "http://"),
("", "http://domain"),
("tld", "http://domain.tld"),
("tld", "http://sub1.domain.tld"),
("tld", "http://sub2.sub1.domain.tld")
]
for test in tests:
self.assertEqual(URLHelper.get_tld(test[1]), test[0])
def test_get_ordered_params(self):
"""Check if the get ordered params method works correctly."""
val1 = URLHelper.get_ordered_params("http://example.tld?a=a&c=c&b=b&d=d")
val2 = URLHelper.get_ordered_params("http://sub.domain.ltd?c=c&b=b&a=a&d=d")
self.assertEqual(val1, val2)
def test_append_with_data_encoded_and_decoded(self):
"""Make sure values do not get decoded or encoded."""
val1 = URLHelper.append_with_data("http://example.tld/", {"val": "{{aaaa}}"})
val2 = URLHelper.append_with_data("http://example.tld/", {"val": "%7B%7Baaaa%7D%7D"})
self.assertEqual(val1, "http://example.tld/?val={{aaaa}}")
self.assertEqual(val2, "http://example.tld/?val=%7B%7Baaaa%7D%7D")
``` |
[
{
"content": "Write the code verbatim:\n```python\nfrom __future__ import (absolute_import, division, print_function)\n\n\nimport sys\nimport warnings\ntry:\n from setuptools import setup\nexcept ImportError:\n try:\n from setuptools.core import setup\n except ImportError:\n from distutil... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nfrom __future__ import (absolute_import, division, print_function)\n\n\nimport sys\nimport warnings\ntry:\n from setuptools import setup\nexcept ImportError:\n try:\n from setuptools.core import setup\n except ImportError:\n ... | ```python
from __future__ import (absolute_import, division, print_function)
import sys
import warnings
try:
from setuptools import setup
except ImportError:
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
from distutils.core import setup, Extension
import numpy
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
SNAPSHOT = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
print(FULLVERSION)
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
if SNAPSHOT:
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always",
"--match", "v[0-9\/]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
print(so, serr)
if pipe.returncode == 0:
pass
print('here')
except:
pass
if pipe is None or pipe.returncode != 0:
warnings.warn("WARNING: Couldn't get git revision, "
"using generic version string")
else:
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
# use result of git describe as version string
FULLVERSION = VERSION + '-' + rev.lstrip('v')
break
else:
FULLVERSION += QUALIFIER
setup(
name='vistools',
version=FULLVERSION,
author='Brookhaven National Lab',
packages=['vistools'],
)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Dhaval Patel\n# Copyright (C) 2011 - TODAY Denero Team. (<http://www.deneroteam.com>)\n#\n# This program is free software: you can redistribute it and/or modify... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Dhaval Patel\n# Copyright (C) 2011 - TODAY Denero Team. (<http://www.deneroteam.com>)\n#\n# This program is free software: you can redistribute ... | ```python
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Dhaval Patel
# Copyright (C) 2011 - TODAY Denero Team. (<http://www.deneroteam.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, _
from openerp.exceptions import Warning as UserError
from openerp.tools.safe_eval import safe_eval
import string
class ResUsers(models.Model):
_inherit = "res.users"
def _validate_password(self, password):
password_rules = []
config_data = {
'auth_password_min_character': safe_eval(
self.env['ir.config_parameter'].get_param(
'auth_password_settings.auth_password_min_character',
'6'
)),
'auth_password_has_capital_letter': safe_eval(
self.env['ir.config_parameter'].get_param(
'auth_password_settings.auth_password_has_capital_letter',
'False'
)),
'auth_password_has_digit': safe_eval(
self.env['ir.config_parameter'].get_param(
'auth_password_settings.auth_password_has_digit',
'False'
)),
'auth_password_has_special_letter': safe_eval(
self.env['ir.config_parameter'].get_param(
'auth_password_settings.auth_password_has_special_letter',
'False'
)),
}
password_rules.append(
lambda s:
len(s) >= config_data.get('auth_password_min_character', 6) or
_('Has %s or more characters') % (config_data.get(
'auth_password_min_character', 6)
)
)
if (config_data.get('auth_password_has_capital_letter', False)):
password_rules.append(
lambda s: any(x.isupper() for x in s) or
_('Has at least One Capital letter')
)
if (config_data.get('auth_password_has_digit', False)):
password_rules.append(
lambda s: any(x.isdigit() for x in s) or
_('Has one Number')
)
if (config_data.get('auth_password_has_special_letter', False)):
password_rules.append(
lambda s: any(x in string.punctuation for x in s) or
_('Has one Special letter')
)
problems = [
p for p in [
r(password) for r in password_rules
] if p and p is not True]
return problems
@api.multi
def write(self, values):
if('password' in values):
problems = self._validate_password(values['password'])
if(problems):
raise UserError(
_("Password must match following rules\n %s ")
% ("\n-- ".join(problems))
)
return super(ResUsers, self).write(values)
@api.multi
def _set_password(self, password):
if(password):
problems = self._validate_password(password)
if(problems):
raise UserError(
_("Password must match following rules\n %s ")
% ("\n-- ".join(problems))
)
return super(ResUsers, self)._set_password(password)
``` |
[
{
"content": "Here is the source code:\n```python\n# Copyright (c) 2013, Menno Smits\n# Released subject to the New BSD License\n# Please see http://en.wikipedia.org/wiki/BSD_licenses\n\nfrom __future__ import unicode_literals\n\nimport imaplib\nimport select\nimport socket\nimport sys\nimport warnings\nfrom da... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n# Copyright (c) 2013, Menno Smits\n# Released subject to the New BSD License\n# Please see http://en.wikipedia.org/wiki/BSD_licenses\n\nfrom __future__ import unicode_literals\n\nimport imaplib\nimport select\nimport socket\nimport sys\nimport w... | ```python
# Copyright (c) 2013, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
from __future__ import unicode_literals
import imaplib
import select
import socket
import sys
import warnings
from datetime import datetime
from operator import itemgetter
from . import response_lexer
# Confusingly, this module is for OAUTH v1, not v2
try:
import oauth2 as oauth_module
except ImportError:
oauth_module = None
from .imap_utf7 import encode as encode_utf7, decode as decode_utf7
from .fixed_offset import FixedOffset
from .six import moves, iteritems, text_type, integer_types, PY3, binary_type
xrange = moves.xrange
if PY3:
long = int # long is just int in python3
__all__ = ['IMAPClient', 'DELETED', 'SEEN', 'ANSWERED', 'FLAGGED', 'DRAFT', 'RECENT']
from .response_parser import parse_response, parse_fetch_response
# We also offer the gmail-specific XLIST command...
if 'XLIST' not in imaplib.Commands:
imaplib.Commands['XLIST'] = imaplib.Commands['LIST']
# ...and IDLE
if 'IDLE' not in imaplib.Commands:
imaplib.Commands['IDLE'] = imaplib.Commands['APPEND']
# System flags
DELETED = r'\Deleted'
SEEN = r'\Seen'
ANSWERED = r'\Answered'
FLAGGED = r'\Flagged'
DRAFT = r'\Draft'
RECENT = r'\Recent' # This flag is read-only
class Namespace(tuple):
def __new__(cls, personal, other, shared):
return tuple.__new__(cls, (personal, other, shared))
personal = property(itemgetter(0))
other = property(itemgetter(1))
shared = property(itemgetter(2))
class IMAPClient(object):
"""
A connection to the IMAP server specified by *host* is made when
the class is instantiated.
*port* defaults to 143, or 993 if *ssl* is ``True``.
If *use_uid* is ``True`` unique message UIDs be used for all calls
that accept message ids (defaults to ``True``).
If *ssl* is ``True`` an SSL connection will be made (defaults to
``False``).
If *stream* is ``True`` then *host* is used as the command to run
to establish a connection to the IMAP server (defaults to
``False``). This is useful for exotic connection or authentication
setups.
The *normalise_times* attribute specifies whether datetimes
returned by ``fetch()`` are normalised to the local system time
and include no timezone information (native), or are datetimes
that include timezone information (aware). By default
*normalise_times* is True (times are normalised to the local
system time). This attribute can be changed between ``fetch()``
calls if required.
The *debug* property can be used to enable debug logging. It can
be set to an integer from 0 to 5 where 0 disables debug output and
5 enables full output with wire logging and parsing logs. ``True``
and ``False`` can also be assigned where ``True`` sets debug level
4.
By default, debug output goes to stderr. The *log_file* attribute
can be assigned to an alternate file handle for writing debug
output to.
"""
Error = imaplib.IMAP4.error
AbortError = imaplib.IMAP4.abort
ReadOnlyError = imaplib.IMAP4.readonly
def __init__(self, host, port=None, use_uid=True, ssl=False, stream=False):
if stream:
if port is not None:
raise ValueError("can't set 'port' when 'stream' True")
if ssl:
raise ValueError("can't use 'ssl' when 'stream' is True")
elif port is None:
port = ssl and 993 or 143
self.host = host
self.port = port
self.ssl = ssl
self.stream = stream
self.use_uid = use_uid
self.folder_encode = True
self.log_file = sys.stderr
self.normalise_times = True
self._cached_capabilities = None
self._imap = self._create_IMAP4()
self._imap._mesg = self._log # patch in custom debug log method
self._idle_tag = None
def _create_IMAP4(self):
# Create the IMAP instance in a separate method to make unit tests easier
if self.stream:
return imaplib.IMAP4_stream(self.host)
ImapClass = self.ssl and imaplib.IMAP4_SSL or imaplib.IMAP4
return ImapClass(self.host, self.port)
def login(self, username, password):
"""Login using *username* and *password*, returning the
server response.
"""
return self._command_and_check('login', username, password, unpack=True)
def oauth_login(self, url, oauth_token, oauth_token_secret,
consumer_key='anonymous', consumer_secret='anonymous'):
"""Authenticate using the OAUTH method.
This only works with IMAP servers that support OAUTH (e.g. Gmail).
"""
if oauth_module:
token = oauth_module.Token(oauth_token, oauth_token_secret)
consumer = oauth_module.Consumer(consumer_key, consumer_secret)
xoauth_callable = lambda x: oauth_module.build_xoauth_string(url, consumer, token)
return self._command_and_check('authenticate', 'XOAUTH', xoauth_callable, unpack=True)
else:
raise self.Error('The optional oauth2 package is needed for OAUTH authentication')
def oauth2_login(self, user, access_token):
"""Authenticate using the OAUTH2 method.
This only works with IMAP servers that support OAUTH2 (e.g. Gmail).
"""
auth_string = lambda x: 'user=%s\1auth=Bearer %s\1\1' % (user, access_token)
return self._command_and_check('authenticate', 'XOAUTH2', auth_string)
def logout(self):
"""Logout, returning the server response.
"""
typ, data = self._imap.logout()
data = from_bytes(data)
self._check_resp('BYE', 'logout', typ, data)
return data[0]
def capabilities(self):
"""Returns the server capability list.
If the session is authenticated and the server has returned a
CAPABILITY response at authentication time, this response
will be returned. Otherwise, the CAPABILITY command will be
issued to the server, with the results cached for future calls.
If the session is not yet authenticated, the cached
capabilities determined at connection time will be returned.
"""
# if a capability response has been cached, use that
if self._cached_capabilities:
return self._cached_capabilities
# If server returned an untagged CAPABILITY response (during
# authentication), cache it and return that.
response = self._imap.untagged_responses.pop('CAPABILITY', None)
if response:
return self._save_capabilities(response[0])
# if authenticated, but don't have a capability reponse, ask for one
if self._imap.state in ('SELECTED', 'AUTH'):
response = self._command_and_check('capability', unpack=True)
return self._save_capabilities(response)
# Just return capabilities that imaplib grabbed at connection
# time (pre-auth)
return from_bytes(self._imap.capabilities)
def _save_capabilities(self, raw_response):
raw_response = from_bytes(raw_response)
self._cached_capabilities = tuple(raw_response.upper().split())
return self._cached_capabilities
def has_capability(self, capability):
"""Return ``True`` if the IMAP server has the given *capability*.
"""
# FIXME: this will not detect capabilities that are backwards
# compatible with the current level. For instance the SORT
# capabilities may in the future be named SORT2 which is
# still compatible with the current standard and will not
# be detected by this method.
return capability.upper() in self.capabilities()
def namespace(self):
"""Return the namespace for the account as a (personal, other,
shared) tuple.
Each element may be None if no namespace of that type exists,
or a sequence of (prefix, separator) pairs.
For convenience the tuple elements may be accessed
positionally or using attributes named *personal*, *other* and
*shared*.
See :rfc:`2342` for more details.
"""
data = self._command_and_check('namespace')
return Namespace(*parse_response(data))
def get_folder_delimiter(self):
"""Return the folder separator used by the IMAP server.
.. warning::
The implementation just picks the first folder separator
from the first namespace returned. This is not
particularly sensible. Use namespace instead().
"""
warnings.warn(DeprecationWarning('get_folder_delimiter is going away. Use namespace() instead.'))
for part in self.namespace():
for ns in part:
return ns[1]
raise self.Error('could not determine folder separator')
def list_folders(self, directory="", pattern="*"):
"""Get a listing of folders on the server as a list of
``(flags, delimiter, name)`` tuples.
Calling list_folders with no arguments will list all
folders.
Specifying *directory* will limit returned folders to that
base directory. Specifying *pattern* will limit returned
folders to those with matching names. The wildcards are
supported in *pattern*. ``*`` matches zero or more of any
character and ``%`` matches 0 or more characters except the
folder delimiter.
Folder names are always returned as unicode strings, and decoded from
modifier utf-7, except if folder_decode is not set.
"""
return self._do_list('LIST', directory, pattern)
def xlist_folders(self, directory="", pattern="*"):
"""Execute the XLIST command, returning ``(flags, delimiter,
name)`` tuples.
This method returns special flags for each folder and a
localized name for certain folders (e.g. the name of the
inbox may be localized and the flags can be used to
determine the actual inbox, even if the name has been
localized.
A ``XLIST`` response could look something like::
[([u'\\HasNoChildren', u'\\Inbox'], '/', u'Inbox'),
([u'\\Noselect', u'\\HasChildren'], '/', u'[Gmail]'),
([u'\\HasNoChildren', u'\\AllMail'], '/', u'[Gmail]/All Mail'),
([u'\\HasNoChildren', u'\\Drafts'], '/', u'[Gmail]/Drafts'),
([u'\\HasNoChildren', u'\\Important'], '/', u'[Gmail]/Important'),
([u'\\HasNoChildren', u'\\Sent'], '/', u'[Gmail]/Sent Mail'),
([u'\\HasNoChildren', u'\\Spam'], '/', u'[Gmail]/Spam'),
([u'\\HasNoChildren', u'\\Starred'], '/', u'[Gmail]/Starred'),
([u'\\HasNoChildren', u'\\Trash'], '/', u'[Gmail]/Trash')]
This is a Gmail-specific IMAP extension. It is the
responsibility of the caller to either check for ``XLIST`` in
the server capabilites, or to handle the error if the server
doesn't support this extension.
The *directory* and *pattern* arguments are as per
list_folders().
"""
return self._do_list('XLIST', directory, pattern)
def list_sub_folders(self, directory="", pattern="*"):
"""Return a list of subscribed folders on the server as
``(flags, delimiter, name)`` tuples.
The default behaviour will list all subscribed folders. The
*directory* and *pattern* arguments are as per list_folders().
"""
return self._do_list('LSUB', directory, pattern)
def _do_list(self, cmd, directory, pattern):
directory = self._normalise_folder(directory)
pattern = self._normalise_folder(pattern)
typ, dat = self._imap._simple_command(cmd, directory, pattern)
dat = from_bytes(dat)
self._checkok(cmd, typ, dat)
typ, dat = self._imap._untagged_response(typ, dat, cmd)
return self._proc_folder_list(from_bytes(dat))
def _proc_folder_list(self, folder_data):
# Filter out empty strings and None's.
# This also deals with the special case of - no 'untagged'
# responses (ie, no folders). This comes back as [None].
folder_data = [item for item in folder_data if item not in ('', None)]
ret = []
parsed = parse_response(folder_data)
while parsed:
# TODO: could be more efficient
flags, delim, name = parsed[:3]
parsed = parsed[3:]
if isinstance(name, int):
# Some IMAP implementations return integer folder names
# with quotes. These get parsed to ints so convert them
# back to strings.
name = text_type(name)
elif self.folder_encode:
name = decode_utf7(name)
ret.append((flags, delim, name))
return ret
def select_folder(self, folder, readonly=False):
"""Set the current folder on the server.
Future calls to methods such as search and fetch will act on
the selected folder.
Returns a dictionary containing the ``SELECT`` response. At least
the ``EXISTS``, ``FLAGS`` and ``RECENT`` keys are guaranteed
to exist. An example::
{'EXISTS': 3,
'FLAGS': ('\\Answered', '\\Flagged', '\\Deleted', ... ),
'RECENT': 0,
'PERMANENTFLAGS': ('\\Answered', '\\Flagged', '\\Deleted', ... ),
'READ-WRITE': True,
'UIDNEXT': 11,
'UIDVALIDITY': 1239278212}
"""
self._command_and_check('select', self._normalise_folder(folder), readonly)
untagged = self._imap.untagged_responses
return self._process_select_response(from_bytes(untagged))
def _process_select_response(self, resp):
out = {}
for key, value in iteritems(resp):
key = key.upper()
if key == 'OK':
continue
elif key in ('EXISTS', 'RECENT', 'UIDNEXT', 'UIDVALIDITY', 'HIGHESTMODSEQ'):
value = int(value[0])
elif key in ('FLAGS', 'PERMANENTFLAGS'):
value = parse_response(value)[0]
elif key == 'READ-WRITE':
value = True
out[key] = value
return out
def noop(self):
"""Execute the NOOP command.
This command returns immediately, returning any server side
status updates. It can also be used to reset any auto-logout
timers.
The return value is the server command response message
followed by a list of status responses. For example::
('NOOP completed.',
[(4, 'EXISTS'),
(3, 'FETCH', ('FLAGS', ('bar', 'sne'))),
(6, 'FETCH', ('FLAGS', ('sne',)))])
"""
tag = self._imap._command('NOOP')
return self._consume_until_tagged_response(tag, 'NOOP')
def idle(self):
"""Put the server into IDLE mode.
In this mode the server will return unsolicited responses
about changes to the selected mailbox. This method returns
immediately. Use ``idle_check()`` to look for IDLE responses
and ``idle_done()`` to stop IDLE mode.
.. note::
Any other commmands issued while the server is in IDLE
mode will fail.
See :rfc:`2177` for more information about the IDLE extension.
"""
self._idle_tag = self._imap._command('IDLE')
resp = from_bytes(self._imap._get_response())
if resp is not None:
raise self.Error('Unexpected IDLE response: %s' % resp)
def idle_check(self, timeout=None):
"""Check for any IDLE responses sent by the server.
This method should only be called if the server is in IDLE
mode (see ``idle()``).
By default, this method will block until an IDLE response is
received. If *timeout* is provided, the call will block for at
most this many seconds while waiting for an IDLE response.
The return value is a list of received IDLE responses. These
will be parsed with values converted to appropriate types. For
example::
[('OK', 'Still here'),
(1, 'EXISTS'),
(1, 'FETCH', ('FLAGS', ('\\NotJunk',)))]
"""
# In py2, imaplib has sslobj (for SSL connections), and sock for non-SSL.
# In the py3 version it's just sock.
sock = getattr(self._imap, 'sslobj', self._imap.sock)
# make the socket non-blocking so the timeout can be
# implemented for this call
sock.setblocking(0)
try:
resps = []
rs, _, _ = select.select([sock], [], [], timeout)
if rs:
while True:
try:
line = from_bytes(self._imap._get_line())
except (socket.timeout, socket.error):
break
except IMAPClient.AbortError:
# An imaplib.IMAP4.abort with "EOF" is raised
# under Python 3
err = sys.exc_info()[1]
if 'EOF' in err.args[0]:
break
else:
raise
else:
resps.append(_parse_untagged_response(line))
return resps
finally:
sock.setblocking(1)
def idle_done(self):
"""Take the server out of IDLE mode.
This method should only be called if the server is already in
IDLE mode.
The return value is of the form ``(command_text,
idle_responses)`` where *command_text* is the text sent by the
server when the IDLE command finished (eg. ``'Idle
terminated'``) and *idle_responses* is a list of parsed idle
responses received since the last call to ``idle_check()`` (if
any). These are returned in parsed form as per
``idle_check()``.
"""
self._imap.send(b'DONE\r\n')
return self._consume_until_tagged_response(self._idle_tag, 'IDLE')
def folder_status(self, folder, what=None):
"""Return the status of *folder*.
*what* should be a sequence of status items to query. This
defaults to ``('MESSAGES', 'RECENT', 'UIDNEXT', 'UIDVALIDITY',
'UNSEEN')``.
Returns a dictionary of the status items for the folder with
keys matching *what*.
"""
if what is None:
what = ('MESSAGES', 'RECENT', 'UIDNEXT', 'UIDVALIDITY', 'UNSEEN')
else:
what = normalise_text_list(what)
what_ = '(%s)' % (' '.join(what))
data = self._command_and_check('status', self._normalise_folder(folder), what_, unpack=True)
_, status_items = parse_response([data])
return dict(as_pairs(status_items))
def close_folder(self):
"""Close the currently selected folder, returning the server
response string.
"""
return self._command_and_check('close', unpack=True)
def create_folder(self, folder):
"""Create *folder* on the server returning the server response string.
"""
return self._command_and_check('create', self._normalise_folder(folder), unpack=True)
def rename_folder(self, old_name, new_name):
"""Change the name of a folder on the server.
"""
return self._command_and_check('rename',
self._normalise_folder(old_name),
self._normalise_folder(new_name),
unpack=True)
def delete_folder(self, folder):
"""Delete *folder* on the server returning the server response string.
"""
return self._command_and_check('delete', self._normalise_folder(folder), unpack=True)
def folder_exists(self, folder):
"""Return ``True`` if *folder* exists on the server.
"""
data = self._command_and_check('list', '""', self._normalise_folder(folder))
data = [x for x in data if x]
return len(data) == 1 and data[0] != None
def subscribe_folder(self, folder):
"""Subscribe to *folder*, returning the server response string.
"""
return self._command_and_check('subscribe', self._normalise_folder(folder))
def unsubscribe_folder(self, folder):
"""Unsubscribe to *folder*, returning the server response string.
"""
return self._command_and_check('unsubscribe', self._normalise_folder(folder))
def search(self, criteria='ALL', charset=None):
"""Return a list of messages ids matching *criteria*.
*criteria* should be a list of of one or more criteria
specifications or a single critera string. Example values
include::
'NOT DELETED'
'UNSEEN'
'SINCE 1-Feb-2011'
*charset* specifies the character set of the strings in the
criteria. It defaults to US-ASCII.
See :rfc:`3501#section-6.4.4` for more details.
"""
criteria = normalise_search_criteria(criteria)
if self.use_uid:
if charset:
args = ['CHARSET', charset]
else:
args = []
args.extend(criteria)
typ, data = self._imap.uid('SEARCH', *args)
else:
typ, data = self._imap.search(charset, *criteria)
data = from_bytes(data)
self._checkok('search', typ, data)
data = data[0]
if data is None: # no untagged responses...
return []
return [long(i) for i in data.split()]
def thread(self, algorithm='REFERENCES', criteria='ALL', charset='UTF-8'):
"""Return a list of messages threads matching *criteria*.
Each thread is a list of messages ids.
See :rfc:`5256` for more details.
"""
if not self.has_capability('THREAD=' + algorithm):
raise ValueError('server does not support %s threading algorithm'
% algorithm)
if not criteria:
raise ValueError('no criteria specified')
args = [algorithm]
if charset:
args.append(charset)
args.extend(normalise_search_criteria(criteria))
data = self._command_and_check('thread', *args, uid=True)
return parse_response(data)
def sort(self, sort_criteria, criteria='ALL', charset='UTF-8'):
"""Return a list of message ids sorted by *sort_criteria* and
optionally filtered by *criteria*.
Example values for *sort_criteria* include::
ARRIVAL
REVERSE SIZE
SUBJECT
The *criteria* argument is as per search().
See :rfc:`5256` for full details.
Note that SORT is an extension to the IMAP4 standard so it may
not be supported by all IMAP servers.
"""
if not criteria:
raise ValueError('no criteria specified')
if not self.has_capability('SORT'):
raise self.Error('The server does not support the SORT extension')
ids = self._command_and_check('sort',
seq_to_parenstr_upper(sort_criteria),
charset,
*normalise_search_criteria(criteria),
uid=True, unpack=True)
return [long(i) for i in ids.split()]
def get_flags(self, messages):
"""Return the flags set for each message in *messages*.
The return value is a dictionary structured like this: ``{
msgid1: [flag1, flag2, ... ], }``.
"""
response = self.fetch(messages, ['FLAGS'])
return self._filter_fetch_dict(response, 'FLAGS')
def add_flags(self, messages, flags):
"""Add *flags* to *messages*.
*flags* should be a sequence of strings.
Returns the flags set for each modified message (see
*get_flags*).
"""
return self._store('+FLAGS', messages, flags, 'FLAGS')
def remove_flags(self, messages, flags):
"""Remove one or more *flags* from *messages*.
*flags* should be a sequence of strings.
Returns the flags set for each modified message (see
*get_flags*).
"""
return self._store('-FLAGS', messages, flags, 'FLAGS')
def set_flags(self, messages, flags):
"""Set the *flags* for *messages*.
*flags* should be a sequence of strings.
Returns the flags set for each modified message (see
*get_flags*).
"""
return self._store('FLAGS', messages, flags, 'FLAGS')
def get_gmail_labels(self, messages):
"""Return the label set for each message in *messages*.
The return value is a dictionary structured like this: ``{
msgid1: [label1, label2, ... ], }``.
This only works with IMAP servers that support the X-GM-LABELS
attribute (eg. Gmail).
"""
response = self.fetch(messages, ['X-GM-LABELS'])
return self._filter_fetch_dict(response, 'X-GM-LABELS')
def add_gmail_labels(self, messages, labels):
"""Add *labels* to *messages*.
*labels* should be a sequence of strings.
Returns the label set for each modified message (see
*get_gmail_labels*).
This only works with IMAP servers that support the X-GM-LABELS
attribute (eg. Gmail).
"""
return self._store('+X-GM-LABELS', messages, labels, 'X-GM-LABELS')
def remove_gmail_labels(self, messages, labels):
"""Remove one or more *labels* from *messages*.
*labels* should be a sequence of strings.
Returns the label set for each modified message (see
*get_gmail_labels*).
This only works with IMAP servers that support the X-GM-LABELS
attribute (eg. Gmail).
"""
return self._store('-X-GM-LABELS', messages, labels, 'X-GM-LABELS')
def set_gmail_labels(self, messages, labels):
"""Set the *labels* for *messages*.
*labels* should be a sequence of strings.
Returns the label set for each modified message (see
*get_gmail_labels*).
This only works with IMAP servers that support the X-GM-LABELS
attribute (eg. Gmail).
"""
return self._store('X-GM-LABELS', messages, labels, 'X-GM-LABELS')
def delete_messages(self, messages):
"""Delete one or more *messages* from the currently selected
folder.
Returns the flags set for each modified message (see
*get_flags*).
"""
return self.add_flags(messages, DELETED)
def fetch(self, messages, data, modifiers=None):
"""Retrieve selected *data* associated with one or more *messages*.
*data* should be specified as a sequnce of strings, one item
per data selector, for example ``['INTERNALDATE',
'RFC822']``.
*modifiers* are required for some extensions to the IMAP
protocol (eg. :rfc:`4551`). These should be a sequnce of strings
if specified, for example ``['CHANGEDSINCE 123']``.
A dictionary is returned, indexed by message number. Each item
in this dictionary is also a dictionary, with an entry
corresponding to each item in *data*.
In addition to an element for each *data* item, the dict
returned for each message also contains a *SEQ* key containing
the sequence number for the message. This allows for mapping
between the UID and sequence number (when the *use_uid*
property is ``True``).
Example::
>> c.fetch([3293, 3230], ['INTERNALDATE', 'FLAGS'])
{3230: {'FLAGS': ('\\Seen',),
'INTERNALDATE': datetime.datetime(2011, 1, 30, 13, 32, 9),
'SEQ': 84},
3293: {'FLAGS': (),
'INTERNALDATE': datetime.datetime(2011, 2, 24, 19, 30, 36),
'SEQ': 110}}
"""
if not messages:
return {}
args = [
'FETCH',
messages_to_str(messages),
seq_to_parenstr_upper(data),
seq_to_parenstr_upper(modifiers) if modifiers else None
]
if self.use_uid:
args.insert(0, 'UID')
tag = self._imap._command(*args)
typ, data = self._imap._command_complete('FETCH', tag)
data = from_bytes(data)
self._checkok('fetch', typ, data)
typ, data = self._imap._untagged_response(typ, data, 'FETCH')
return parse_fetch_response(from_bytes(data), self.normalise_times, self.use_uid)
def append(self, folder, msg, flags=(), msg_time=None):
"""Append a message to *folder*.
*msg* should be a string contains the full message including
headers.
*flags* should be a sequence of message flags to set. If not
specified no flags will be set.
*msg_time* is an optional datetime instance specifying the
date and time to set on the message. The server will set a
time if it isn't specified. If *msg_time* contains timezone
information (tzinfo), this will be honoured. Otherwise the
local machine's time zone sent to the server.
Returns the APPEND response as returned by the server.
"""
if msg_time:
time_val = '"%s"' % datetime_to_imap(msg_time)
if PY3:
time_val = to_unicode(time_val)
else:
time_val = to_bytes(time_val)
else:
time_val = None
return self._command_and_check('append',
self._normalise_folder(folder),
seq_to_parenstr(flags),
time_val,
to_bytes(msg),
unpack=True)
def copy(self, messages, folder):
"""Copy one or more messages from the current folder to
*folder*. Returns the COPY response string returned by the
server.
"""
return self._command_and_check('copy',
messages_to_str(messages),
self._normalise_folder(folder),
uid=True, unpack=True)
def expunge(self):
"""Remove any messages from the currently selected folder that
have the ``\\Deleted`` flag set.
The return value is the server response message
followed by a list of expunge responses. For example::
('Expunge completed.',
[(2, 'EXPUNGE'),
(1, 'EXPUNGE'),
(0, 'RECENT')])
In this case, the responses indicate that the message with
sequence numbers 2 and 1 where deleted, leaving no recent
messages in the folder.
See :rfc:`3501#section-6.4.3` section 6.4.3 and
:rfc:`3501#section-7.4.1` section 7.4.1 for more details.
"""
tag = self._imap._command('EXPUNGE')
return self._consume_until_tagged_response(tag, 'EXPUNGE')
def getacl(self, folder):
"""Returns a list of ``(who, acl)`` tuples describing the
access controls for *folder*.
"""
data = self._command_and_check('getacl', self._normalise_folder(folder))
parts = list(response_lexer.TokenSource(data))
parts = parts[1:] # First item is folder name
return [(parts[i], parts[i+1]) for i in xrange(0, len(parts), 2)]
def setacl(self, folder, who, what):
"""Set an ACL (*what*) for user (*who*) for a folder.
Set *what* to an empty string to remove an ACL. Returns the
server response string.
"""
return self._command_and_check('setacl',
self._normalise_folder(folder),
who, what,
unpack=True)
def _check_resp(self, expected, command, typ, data):
"""Check command responses for errors.
Raises IMAPClient.Error if the command fails.
"""
if typ != expected:
raise self.Error('%s failed: %r' % (command, data[0]))
def _consume_until_tagged_response(self, tag, command):
tagged_commands = self._imap.tagged_commands
resps = []
while True:
line = self._imap._get_response()
if tagged_commands[tag]:
break
resps.append(_parse_untagged_response(from_bytes(line)))
typ, data = tagged_commands.pop(tag)
data = from_bytes(data)
self._checkok(command, typ, data)
return data[0], resps
def _command_and_check(self, command, *args, **kwargs):
unpack = pop_with_default(kwargs, 'unpack', False)
uid = pop_with_default(kwargs, 'uid', False)
assert not kwargs, "unexpected keyword args: " + ', '.join(kwargs)
if uid and self.use_uid:
typ, data = self._imap.uid(command, *args)
else:
meth = getattr(self._imap, command)
typ, data = meth(*args)
data = from_bytes(data)
self._checkok(command, typ, data)
if unpack:
return data[0]
return data
def _checkok(self, command, typ, data):
self._check_resp('OK', command, typ, data)
def _store(self, cmd, messages, flags, fetch_key):
"""Worker function for the various flag manipulation methods.
*cmd* is the STORE command to use (eg. '+FLAGS').
"""
if not messages:
return {}
data = self._command_and_check('store',
messages_to_str(messages),
cmd,
seq_to_parenstr(flags),
uid=True)
return self._filter_fetch_dict(parse_fetch_response(data), fetch_key)
def _filter_fetch_dict(self, fetch_dict, key):
return dict((msgid, data[key])
for msgid, data in iteritems(fetch_dict))
def __debug_get(self):
return self._imap.debug
def __debug_set(self, level):
if level is True:
level = 4
elif level is False:
level = 0
self._imap.debug = level
debug = property(__debug_get, __debug_set)
def _log(self, text):
self.log_file.write('%s %s\n' % (datetime.now().strftime('%M:%S.%f'), text))
self.log_file.flush()
def _normalise_folder(self, folder_name):
if isinstance(folder_name, binary_type):
folder_name = folder_name.decode('ascii')
if self.folder_encode:
folder_name = encode_utf7(folder_name)
return self._imap._quote(folder_name)
def normalise_text_list(items):
return list(_normalise_text_list(items))
def seq_to_parenstr(items):
return _join_and_paren(_normalise_text_list(items))
def seq_to_parenstr_upper(items):
return _join_and_paren(item.upper() for item in _normalise_text_list(items))
def messages_to_str(messages):
"""Convert a sequence of messages ids or a single integer message id
into an id list string for use with IMAP commands
"""
if isinstance(messages, (text_type, binary_type, integer_types)):
messages = (messages,)
return ','.join(_maybe_int_to_unicode(m) for m in messages)
def _maybe_int_to_unicode(val):
if isinstance(val, integer_types):
return text_type(val)
return to_unicode(val)
def normalise_search_criteria(criteria):
if not criteria:
raise ValueError('no criteria specified')
return ['(%s)' % item for item in _normalise_text_list(criteria)]
def _join_and_paren(items):
return '(%s)' % ' '.join(items)
def _normalise_text_list(items):
if isinstance(items, (text_type, binary_type)):
items = (items,)
return (to_unicode(c) for c in items)
def datetime_to_imap(dt):
"""Convert a datetime instance to a IMAP datetime string.
If timezone information is missing the current system
timezone is used.
"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=FixedOffset.for_system())
return dt.strftime("%d-%b-%Y %H:%M:%S %z")
def _parse_untagged_response(text):
assert text.startswith('* ')
text = text[2:]
if text.startswith(('OK ', 'NO ')):
return tuple(text.split(' ', 1))
return parse_response([text])
def pop_with_default(dct, key, default):
if key in dct:
return dct.pop(key)
return default
def as_pairs(items):
i = 0
last_item = None
for item in items:
if i % 2:
yield last_item, item
else:
last_item = item
i += 1
def to_unicode(s):
if isinstance(s, binary_type):
return s.decode('ascii')
return s
def to_bytes(s):
if isinstance(s, text_type):
return s.encode('ascii')
return s
def from_bytes(data):
"""Convert bytes to string in lists, tuples and dicts.
"""
if isinstance(data, dict):
decoded = {}
for key, value in iteritems(data):
decoded[from_bytes(key)] = from_bytes(value)
return decoded
elif isinstance(data, list):
return [from_bytes(item) for item in data]
elif isinstance(data, tuple):
return tuple([from_bytes(item) for item in data])
elif isinstance(data, binary_type):
return data.decode('latin-1')
return data
``` |
[
{
"content": "```python\n# Copyright (C) 2020 OpenMotics BV\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later... | [
{
"content": "<|memory_start|>```python\n# Copyright (C) 2020 OpenMotics BV\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your o... | ```python
# Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Ventilation (de)serializer
"""
from __future__ import absolute_import
import logging
from gateway.api.serializers.base import SerializerToolbox
from gateway.dto import VentilationDTO, VentilationSourceDTO, \
VentilationStatusDTO
from toolbox import Toolbox
if False: # MYPY
from typing import Any, Dict, Optional, List, Tuple
logger = logging.getLogger(__name__)
class VentilationSerializer(object):
@staticmethod
def serialize(ventilation_dto, fields):
# type: (VentilationDTO, Optional[List[str]]) -> Dict[str,Any]
data = {'id': ventilation_dto.id,
'external_id': Toolbox.denonify(ventilation_dto.external_id, ''),
'source': {'type': ventilation_dto.source.type,
'name': ventilation_dto.source.name},
'name': Toolbox.denonify(ventilation_dto.name, ''),
'amount_of_levels': Toolbox.denonify(ventilation_dto.amount_of_levels, 0),
'device': {'vendor': Toolbox.denonify(ventilation_dto.device_vendor, ''),
'type': Toolbox.denonify(ventilation_dto.device_type, '')}}
if ventilation_dto.device_serial:
data['device'].update({'serial': Toolbox.denonify(ventilation_dto.device_serial, '')})
return SerializerToolbox.filter_fields(data, fields)
@staticmethod
def deserialize(api_data): # type: (Dict[str,Any]) -> VentilationDTO
ventilation_id = None # type: Optional[int]
if 'id' in api_data:
ventilation_id = api_data['id']
source_dto = None # type: Optional[VentilationSourceDTO]
if 'source' in api_data:
source_dto = VentilationSourceDTO(None,
name=api_data['source']['name'],
type=api_data['source']['type'])
ventilation_dto = VentilationDTO(id=ventilation_id, source=source_dto)
if 'external_id' in api_data:
ventilation_dto.external_id = Toolbox.nonify(api_data['external_id'], '')
if 'name' in api_data:
ventilation_dto.name = Toolbox.nonify(api_data['name'], '')
if 'amount_of_levels' in api_data:
ventilation_dto.amount_of_levels = Toolbox.nonify(api_data['amount_of_levels'], '')
if 'device' in api_data:
if 'type' in api_data['device']:
ventilation_dto.device_type = Toolbox.nonify(api_data['device']['type'], '')
if 'vendor' in api_data['device']:
ventilation_dto.device_vendor = Toolbox.nonify(api_data['device']['vendor'], '')
if 'serial' in api_data['device']:
ventilation_dto.device_serial = Toolbox.nonify(api_data['device']['serial'], '')
return ventilation_dto
class VentilationStatusSerializer(object):
@staticmethod
def serialize(status_dto, fields):
# type: (VentilationStatusDTO, Optional[List[str]]) -> Dict[str,Any]
data = {'id': status_dto.id,
'mode': status_dto.mode,
'level': status_dto.level,
'remaining_time': status_dto.remaining_time,
'timer': status_dto.timer,
'connected': status_dto.is_connected}
return SerializerToolbox.filter_fields(data, fields)
@staticmethod
def deserialize(api_data): # type: (Dict[str,Any]) -> VentilationStatusDTO
status_dto = VentilationStatusDTO(id=api_data['id'],
mode=api_data['mode'])
if 'level' in api_data:
status_dto.level = Toolbox.nonify(api_data['level'], 0)
if 'timer' in api_data:
status_dto.timer = Toolbox.nonify(api_data['timer'], 0)
if 'remaining_time' in api_data:
status_dto.remaining_time = Toolbox.nonify(api_data['remaining_time'], 0)
return status_dto
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# Copyright 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# htt... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# Copyright 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License a... | ```python
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from magnum.common import exception
from magnum.common import utils
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif utils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_bays_filters(self, query, filters):
if filters is None:
filters = []
if 'baymodel_id' in filters:
query = query.filter_by(baymodel_id=filters['baymodel_id'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'node_count' in filters:
query = query.filter_by(node_count=filters['node_count'])
if 'master_count' in filters:
query = query.filter_by(master_count=filters['master_count'])
if 'stack_id' in filters:
query = query.filter_by(stack_id=filters['stack_id'])
if 'api_address' in filters:
query = query.filter_by(api_address=filters['api_address'])
if 'node_addresses' in filters:
query = query.filter_by(node_addresses=filters['node_addresses'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'status' in filters:
query = query.filter(models.Bay.status.in_(filters['status']))
return query
def get_bay_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = self._add_bays_filters(query, filters)
return _paginate_query(models.Bay, limit, marker,
sort_key, sort_dir, query)
def create_bay(self, values):
# ensure defaults are present for new bays
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
bay = models.Bay()
bay.update(values)
try:
bay.save()
except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid'])
return bay
def get_bay_by_id(self, context, bay_id):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=bay_id)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=bay_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.')
except NoResultFound:
raise exception.BayNotFound(bay=bay_name)
def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_bay(self, bay_id):
def destroy_bay_resources(session, bay_uuid):
"""Checks whether the bay does not have resources."""
query = model_query(models.Pod, session=session)
query = self._add_pods_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Service, session=session)
query = self._add_services_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.ReplicationController, session=session)
query = self._add_rcs_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Container, session=session)
query = self._add_containers_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
bay_ref = query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
destroy_bay_resources(session, bay_ref['uuid'])
query.delete()
def update_bay(self, bay_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Bay.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_bay(bay_id, values)
def _do_update_bay(self, bay_id, values):
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def create_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is not None:
return lock.conductor_id
session.add(models.BayLock(bay_uuid=bay_uuid,
conductor_id=conductor_id))
def steal_bay_lock(self, bay_uuid, old_conductor_id, new_conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is None:
return True
elif lock.conductor_id != old_conductor_id:
return lock.conductor_id
else:
lock.update({'conductor_id': new_conductor_id})
def release_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
query = query.filter_by(bay_uuid=bay_uuid,
conductor_id=conductor_id)
count = query.delete()
if count == 0:
return True
def _add_baymodels_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'flavor_id' in filters:
query = query.filter_by(flavor_id=filters['flavor_id'])
if 'master_flavor_id' in filters:
query = query.filter_by(
master_flavor_id=filters['master_flavor_id'])
if 'keypair_id' in filters:
query = query.filter_by(keypair_id=filters['keypair_id'])
if 'external_network_id' in filters:
query = query.filter_by(
external_network_id=filters['external_network_id'])
if 'dns_nameserver' in filters:
query = query.filter_by(dns_nameserver=filters['dns_nameserver'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'labels' in filters:
query = query.filter_by(labels=filters['labels'])
return query
def get_baymodel_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = self._add_baymodels_filters(query, filters)
return _paginate_query(models.BayModel, limit, marker,
sort_key, sort_dir, query)
def create_baymodel(self, values):
# ensure defaults are present for new baymodels
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
baymodel = models.BayModel()
baymodel.update(values)
try:
baymodel.save()
except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid'])
return baymodel
def get_baymodel_by_id(self, context, baymodel_id):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=baymodel_id)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=baymodel_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=baymodel_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.')
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name)
def destroy_baymodel(self, baymodel_id):
def is_baymodel_referenced(session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s)."""
query = model_query(models.Bay, session=session)
query = self._add_bays_filters(query,
{'baymodel_id': baymodel_uuid})
return query.count() != 0
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
baymodel_ref = query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id)
query.delete()
def update_baymodel(self, baymodel_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing BayModel.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_baymodel(baymodel_id, values)
def _do_update_baymodel(self, baymodel_id, values):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
ref.update(values)
return ref
def _add_containers_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image' in filters:
query = query.filter_by(image=filters['image'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_container_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = self._add_containers_filters(query, filters)
return _paginate_query(models.Container, limit, marker,
sort_key, sort_dir, query)
def create_container(self, values):
# ensure defaults are present for new containers
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
container = models.Container()
container.update(values)
try:
container.save()
except db_exc.DBDuplicateEntry:
raise exception.ContainerAlreadyExists(uuid=values['uuid'])
return container
def get_container_by_id(self, context, container_id):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=container_id)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
def get_container_by_uuid(self, context, container_uuid):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=container_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_uuid)
def get_container_by_name(self, context, container_name):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=container_name)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_name)
except MultipleResultsFound:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
def destroy_container(self, container_id):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
count = query.delete()
if count != 1:
raise exception.ContainerNotFound(container_id)
def update_container(self, container_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_container(container_id, values)
def _do_update_container(self, container_id, values):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_nodes_filters(self, query, filters):
if filters is None:
filters = []
if 'associated' in filters:
if filters['associated']:
query = query.filter(models.Node.ironic_node_id != None)
else:
query = query.filter(models.Node.ironic_node_id == None)
if 'type' in filters:
query = query.filter_by(type=filters['type'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_node_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
def create_node(self, values):
# ensure defaults are present for new nodes
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
node = models.Node()
node.update(values)
try:
node.save()
except db_exc.DBDuplicateEntry as exc:
if 'ironic_node_id' in exc.columns:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=values['uuid'])
raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
def get_node_by_id(self, context, node_id):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=node_id)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, context, node_uuid):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=node_uuid)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def destroy_node(self, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
count = query.delete()
if count != 1:
raise exception.NodeNotFound(node_id)
def update_node(self, node_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Node.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_node(node_id, values)
except db_exc.DBDuplicateEntry:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=node_id)
def _do_update_node(self, node_id, values):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Prevent ironic_node_id overwriting
if values.get("ironic_node_id") and ref.ironic_node_id:
raise exception.NodeAssociated(
node=node_id,
instance=ref.ironic_node_id)
ref.update(values)
return ref
def _add_pods_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
return query
def get_pod_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = self._add_pods_filters(query, filters)
return _paginate_query(models.Pod, limit, marker,
sort_key, sort_dir, query)
def create_pod(self, values):
# ensure defaults are present for new pods
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
pod = models.Pod()
pod.update(values)
try:
pod.save()
except db_exc.DBDuplicateEntry:
raise exception.PodAlreadyExists(uuid=values['uuid'])
return pod
def get_pod_by_id(self, context, pod_id):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=pod_id)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
def get_pod_by_uuid(self, context, pod_uuid):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=pod_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_uuid)
def get_pod_by_name(self, pod_name):
query = model_query(models.Pod).filter_by(name=pod_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple pods exist with same name.'
' Please use the pod uuid instead.')
except NoResultFound:
raise exception.PodNotFound(pod=pod_name)
def destroy_pod(self, pod_id):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
count = query.delete()
if count != 1:
raise exception.PodNotFound(pod_id)
def update_pod(self, pod_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Pod.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_pod(pod_id, values)
def _do_update_pod(self, pod_id, values):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_services_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'ip' in filters:
query = query.filter_by(ip=filters['ip'])
if 'ports' in filters:
query = query.filter_by(ports=filters['ports'])
return query
def get_service_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = self._add_services_filters(query, filters)
return _paginate_query(models.Service, limit, marker,
sort_key, sort_dir, query)
def create_service(self, values):
# ensure defaults are present for new services
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
service = models.Service()
service.update(values)
try:
service.save()
except db_exc.DBDuplicateEntry:
raise exception.ServiceAlreadyExists(uuid=values['uuid'])
return service
def get_service_by_id(self, context, service_id):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=service_id)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
def get_service_by_uuid(self, context, service_uuid):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=service_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_uuid)
def get_services_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.Service).filter_by(bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ServiceNotFound(bay=bay_uuid)
def get_service_by_name(self, context, service_name):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=service_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple services exist with same name.'
' Please use the service uuid instead.')
except NoResultFound:
raise exception.ServiceNotFound(service=service_name)
def destroy_service(self, service_id):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
count = query.delete()
if count != 1:
raise exception.ServiceNotFound(service_id)
def update_service(self, service_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Service.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_service(service_id, values)
def _do_update_service(self, service_id, values):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_rcs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'replicas' in filters:
query = query.filter_by(replicas=filters['replicas'])
return query
def get_rc_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = self._add_rcs_filters(query, filters)
return _paginate_query(models.ReplicationController, limit, marker,
sort_key, sort_dir, query)
def create_rc(self, values):
# ensure defaults are present for new ReplicationController
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
rc = models.ReplicationController()
rc.update(values)
try:
rc.save()
except db_exc.DBDuplicateEntry:
raise exception.ReplicationControllerAlreadyExists(
uuid=values['uuid'])
return rc
def get_rc_by_id(self, context, rc_id):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=rc_id)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
def get_rc_by_uuid(self, context, rc_uuid):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=rc_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_uuid)
def get_rcs_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.ReplicationController).filter_by(
bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ReplicationControllerNotFound(bay=bay_uuid)
def get_rc_by_name(self, context, rc_name):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=rc_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple rcs exist with same name.'
' Please use the rc uuid instead.')
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_name)
def destroy_rc(self, rc_id):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
count = query.delete()
if count != 1:
raise exception.ReplicationControllerNotFound(rc_id)
def update_rc(self, rc_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing rc.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_rc(rc_id, values)
def _do_update_rc(self, rc_id, values):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_name(self, context, x509keypair_name):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=x509keypair_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple x509keypairs exist with '
'same name. Please use the x509keypair '
'uuid instead.')
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_name)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# htt... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License ... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3",
manifest={
"EntityType",
"ListEntityTypesRequest",
"ListEntityTypesResponse",
"GetEntityTypeRequest",
"CreateEntityTypeRequest",
"UpdateEntityTypeRequest",
"DeleteEntityTypeRequest",
},
)
class EntityType(proto.Message):
r"""Entities are extracted from user input and represent parameters that
are meaningful to your application. For example, a date range, a
proper name such as a geographic location or landmark, and so on.
Entities represent actionable data for your application.
When you define an entity, you can also include synonyms that all
map to that entity. For example, "soft drink", "soda", "pop", and so
on.
There are three types of entities:
- **System** - entities that are defined by the Dialogflow API for
common data types such as date, time, currency, and so on. A
system entity is represented by the ``EntityType`` type.
- **Custom** - entities that are defined by you that represent
actionable data that is meaningful to your application. For
example, you could define a ``pizza.sauce`` entity for red or
white pizza sauce, a ``pizza.cheese`` entity for the different
types of cheese on a pizza, a ``pizza.topping`` entity for
different toppings, and so on. A custom entity is represented by
the ``EntityType`` type.
- **User** - entities that are built for an individual user such as
favorites, preferences, playlists, and so on. A user entity is
represented by the
[SessionEntityType][google.cloud.dialogflow.cx.v3.SessionEntityType]
type.
For more information about entity types, see the `Dialogflow
documentation <https://cloud.google.com/dialogflow/docs/entities-overview>`__.
Attributes:
name (str):
The unique identifier of the entity type. Required for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
display_name (str):
Required. The human-readable name of the
entity type, unique within the agent.
kind (google.cloud.dialogflowcx_v3.types.EntityType.Kind):
Required. Indicates the kind of entity type.
auto_expansion_mode (google.cloud.dialogflowcx_v3.types.EntityType.AutoExpansionMode):
Indicates whether the entity type can be
automatically expanded.
entities (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.Entity]):
The collection of entity entries associated
with the entity type.
excluded_phrases (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.ExcludedPhrase]):
Collection of exceptional words and phrases that shouldn't
be matched. For example, if you have a size entity type with
entry ``giant``\ (an adjective), you might consider adding
``giants``\ (a noun) as an exclusion. If the kind of entity
type is ``KIND_MAP``, then the phrases specified by entities
and excluded phrases should be mutually exclusive.
enable_fuzzy_extraction (bool):
Enables fuzzy entity extraction during
classification.
redact (bool):
Indicates whether parameters of the entity
type should be redacted in log. If redaction is
enabled, page parameters and intent parameters
referring to the entity type will be replaced by
parameter name when logging.
"""
class Kind(proto.Enum):
r"""Represents kinds of entities."""
KIND_UNSPECIFIED = 0
KIND_MAP = 1
KIND_LIST = 2
KIND_REGEXP = 3
class AutoExpansionMode(proto.Enum):
r"""Represents different entity type expansion modes. Automated
expansion allows an agent to recognize values that have not been
explicitly listed in the entity (for example, new kinds of
shopping list items).
"""
AUTO_EXPANSION_MODE_UNSPECIFIED = 0
AUTO_EXPANSION_MODE_DEFAULT = 1
class Entity(proto.Message):
r"""An **entity entry** for an associated entity type.
Attributes:
value (str):
Required. The primary value associated with this entity
entry. For example, if the entity type is *vegetable*, the
value could be *scallions*.
For ``KIND_MAP`` entity types:
- A canonical value to be used in place of synonyms.
For ``KIND_LIST`` entity types:
- A string that can contain references to other entity
types (with or without aliases).
synonyms (Sequence[str]):
Required. A collection of value synonyms. For example, if
the entity type is *vegetable*, and ``value`` is
*scallions*, a synonym could be *green onions*.
For ``KIND_LIST`` entity types:
- This collection must contain exactly one synonym equal to
``value``.
"""
value = proto.Field(proto.STRING, number=1,)
synonyms = proto.RepeatedField(proto.STRING, number=2,)
class ExcludedPhrase(proto.Message):
r"""An excluded entity phrase that should not be matched.
Attributes:
value (str):
Required. The word or phrase to be excluded.
"""
value = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
kind = proto.Field(proto.ENUM, number=3, enum=Kind,)
auto_expansion_mode = proto.Field(proto.ENUM, number=4, enum=AutoExpansionMode,)
entities = proto.RepeatedField(proto.MESSAGE, number=5, message=Entity,)
excluded_phrases = proto.RepeatedField(
proto.MESSAGE, number=6, message=ExcludedPhrase,
)
enable_fuzzy_extraction = proto.Field(proto.BOOL, number=7,)
redact = proto.Field(proto.BOOL, number=9,)
class ListEntityTypesRequest(proto.Message):
r"""The request message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
parent (str):
Required. The agent to list all entity types for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
language_code (str):
The language to list entity types for. The following fields
are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListEntityTypesResponse(proto.Message):
r"""The response message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
entity_types (Sequence[google.cloud.dialogflowcx_v3.types.EntityType]):
The list of entity types. There will be a maximum number of
items returned based on the page_size field in the request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
entity_types = proto.RepeatedField(proto.MESSAGE, number=1, message="EntityType",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.GetEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.GetEntityType].
Attributes:
name (str):
Required. The name of the entity type. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
language_code (str):
The language to retrieve the entity type for. The following
fields are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
name = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
class CreateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.CreateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.CreateEntityType].
Attributes:
parent (str):
Required. The agent to create a entity type for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to create.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
parent = proto.Field(proto.STRING, number=1,)
entity_type = proto.Field(proto.MESSAGE, number=2, message="EntityType",)
language_code = proto.Field(proto.STRING, number=3,)
class UpdateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Attributes:
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to update.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get updated.
"""
entity_type = proto.Field(proto.MESSAGE, number=1, message="EntityType",)
language_code = proto.Field(proto.STRING, number=2,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.DeleteEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.DeleteEntityType].
Attributes:
name (str):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
force (bool):
This field has no effect for entity type not being used. For
entity types that are used by intents or pages:
- If ``force`` is set to false, an error will be returned
with message indicating the referencing resources.
- If ``force`` is set to true, Dialogflow will remove the
entity type, as well as any references to the entity type
(i.e. Page
[parameter][google.cloud.dialogflow.cx.v3.Form.Parameter]
of the entity type will be changed to '@sys.any' and
intent
[parameter][google.cloud.dialogflow.cx.v3.Intent.Parameter]
of the entity type will be removed).
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python3\n# Copyright (c) 2015-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test the ZMQ API.\"\"\"\n\nfrom test_fram... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# Copyright (c) 2015-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test the ZMQ API.\"\"\"\n... | ```python
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
from test_framework.test_framework import TrollcoinTestFramework
from test_framework.util import *
import zmq
import struct
class ZMQTest (TrollcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
port = 29243
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
blockcount += 1
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
``` |
[
{
"content": "Here is the script:\n```python\nfrom Components.config import config, ConfigSubsection, ConfigSelection, ConfigPIN, ConfigYesNo, ConfigSubList, ConfigInteger\nfrom Components.ServiceList import refreshServiceList\nfrom Screens.InputBox import PinInput\nfrom Screens.MessageBox import MessageBox\nfr... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nfrom Components.config import config, ConfigSubsection, ConfigSelection, ConfigPIN, ConfigYesNo, ConfigSubList, ConfigInteger\nfrom Components.ServiceList import refreshServiceList\nfrom Screens.InputBox import PinInput\nfrom Screens.MessageBox impor... | ```python
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigPIN, ConfigYesNo, ConfigSubList, ConfigInteger
from Components.ServiceList import refreshServiceList
from Screens.InputBox import PinInput
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
from ServiceReference import ServiceReference
from Tools import Notifications
from Tools.Directories import resolveFilename, SCOPE_CONFIG
from Tools.Notifications import AddPopup
from enigma import eTimer, eServiceCenter, iServiceInformation, eServiceReference, eDVBDB
import time
import os
TYPE_SERVICE = "SERVICE"
TYPE_BOUQUETSERVICE = "BOUQUETSERVICE"
TYPE_BOUQUET = "BOUQUET"
LIST_BLACKLIST = "blacklist"
def InitParentalControl():
config.ParentalControl = ConfigSubsection()
config.ParentalControl.storeservicepin = ConfigSelection(default="never", choices=[("never", _("never")), ("5", _("%d minutes") % 5), ("30", _("%d minutes") % 30), ("60", _("%d minutes") % 60), ("standby", _("until standby/restart"))])
config.ParentalControl.configured = ConfigYesNo(default=False)
config.ParentalControl.setuppinactive = ConfigYesNo(default=False)
config.ParentalControl.retries = ConfigSubsection()
config.ParentalControl.retries.servicepin = ConfigSubsection()
config.ParentalControl.retries.servicepin.tries = ConfigInteger(default=3)
config.ParentalControl.retries.servicepin.time = ConfigInteger(default=3)
config.ParentalControl.servicepin = ConfigSubList()
config.ParentalControl.servicepin.append(ConfigPIN(default=0))
config.ParentalControl.age = ConfigSelection(default="18", choices=[("0", _("No age block"))] + list((str(x), "%d+" % x) for x in range(3, 19)))
config.ParentalControl.hideBlacklist = ConfigYesNo(default=False)
config.ParentalControl.config_sections = ConfigSubsection()
config.ParentalControl.config_sections.main_menu = ConfigYesNo(default=False)
config.ParentalControl.config_sections.configuration = ConfigYesNo(default=False)
config.ParentalControl.config_sections.timer_menu = ConfigYesNo(default=False)
config.ParentalControl.config_sections.plugin_browser = ConfigYesNo(default=False)
config.ParentalControl.config_sections.standby_menu = ConfigYesNo(default=False)
config.ParentalControl.config_sections.software_update = ConfigYesNo(default=False)
config.ParentalControl.config_sections.manufacturer_reset = ConfigYesNo(default=True)
config.ParentalControl.config_sections.movie_list = ConfigYesNo(default=False)
config.ParentalControl.config_sections.context_menus = ConfigYesNo(default=False)
config.ParentalControl.config_sections.vixmenu = ConfigYesNo(default=False)
#Added for backwards compatibility with some 3rd party plugins that depend on this config
config.ParentalControl.servicepinactive = config.ParentalControl.configured
config.ParentalControl.setuppin = config.ParentalControl.servicepin[0]
config.ParentalControl.retries.setuppin = config.ParentalControl.retries.servicepin
config.ParentalControl.type = ConfigSelection(default="blacklist", choices=[(LIST_BLACKLIST, _("blacklist"))])
global parentalControl
parentalControl = ParentalControl()
class ParentalControl:
def __init__(self):
#Do not call open on init, because bouquets are not ready at that moment
self.filesOpened = False
self.PinDlg = None
#This is the timer that is used to see, if the time for caching the pin is over
#Of course we could also work without a timer and compare the times every
#time we call isServicePlayable. But this might probably slow down zapping,
#That's why I decided to use a timer
self.sessionPinTimer = eTimer()
self.sessionPinTimer.callback.append(self.resetSessionPin)
self.getConfigValues()
def serviceMethodWrapper(self, service, method, *args):
#This method is used to call all functions that need a service as Parameter:
#It takes either a Service- Reference or a Bouquet- Reference and passes
#Either the service or all services contained in the bouquet to the method given
#That way all other functions do not need to distinguish between service and bouquet.
if "FROM BOUQUET" in service:
method(service, TYPE_BOUQUET, *args)
servicelist = self.readServicesFromBouquet(service, "C")
for ref in servicelist:
sRef = str(ref[0])
method(sRef, TYPE_BOUQUETSERVICE, *args)
else:
ref = ServiceReference(service)
sRef = str(ref)
method(sRef, TYPE_SERVICE, *args)
def isProtected(self, ref):
if not config.ParentalControl.servicepinactive.value or not ref:
return False
#Check if configuration has already been read or if the significant values have changed.
#If true: read the configuration
if self.storeServicePin != config.ParentalControl.storeservicepin.value:
self.getConfigValues()
service = ref.toCompareString()
path = ref.getPath()
info = eServiceCenter.getInstance().info(ref)
age = 0
if path.startswith("/"):
if service.startswith("1:"):
refstr = info and info.getInfoString(ref, iServiceInformation.sServiceref)
service = refstr and eServiceReference(refstr).toCompareString()
if [x for x in path[1:].split("/") if x.startswith(".") and not x == ".Trash"]:
age = 18
elif int(config.ParentalControl.age.value):
event = info and info.getEvent(ref)
rating = event and event.getParentalData()
age = rating and rating.getRating()
age = age and age <= 15 and age + 3 or 0
return (age and age >= int(config.ParentalControl.age.value)) or service and service in self.blacklist
def isServicePlayable(self, ref, callback, session=None):
self.session = session
if self.isProtected(ref):
#Check if the session pin is cached
if self.sessionPinCached:
return True
self.callback = callback
service = ref.toCompareString()
title = 'FROM BOUQUET "userbouquet.' in service and _("this bouquet is protected by a parental control pin") or _("this service is protected by a parental control pin")
if session:
Notifications.RemovePopup("Parental control")
if self.PinDlg:
self.PinDlg.close()
self.PinDlg = session.openWithCallback(boundFunction(self.servicePinEntered, ref), PinInput, triesEntry=config.ParentalControl.retries.servicepin, pinList=self.getPinList(), service=ServiceReference(ref).getServiceName(), title=title, windowTitle=_("Parental control"), simple=False)
else:
Notifications.AddNotificationParentalControl(boundFunction(self.servicePinEntered, ref), PinInput, triesEntry=config.ParentalControl.retries.servicepin, pinList=self.getPinList(), service=ServiceReference(ref).getServiceName(), title=title, windowTitle=_("Parental control"))
return False
else:
return True
def protectService(self, service):
if service not in self.blacklist:
self.serviceMethodWrapper(service, self.addServiceToList, self.blacklist)
if config.ParentalControl.hideBlacklist.value and not self.sessionPinCached:
eDVBDB.getInstance().addFlag(eServiceReference(service), 2)
def unProtectService(self, service):
if service in self.blacklist:
self.serviceMethodWrapper(service, self.removeServiceFromList, self.blacklist)
def getProtectionLevel(self, service):
return service not in self.blacklist and -1 or 0
def isServiceProtectionBouquet(self, service):
return service in self.blacklist and TYPE_BOUQUETSERVICE in self.blacklist[service]
def getConfigValues(self):
#Read all values from configuration
self.checkPinInterval = False
self.checkPinIntervalCancel = False
self.checkSessionPin = False
self.sessionPinCached = False
self.pinIntervalSeconds = 0
self.pinIntervalSecondsCancel = 0
self.storeServicePin = config.ParentalControl.storeservicepin.value
if self.storeServicePin == "never":
pass
elif self.storeServicePin == "standby":
self.checkSessionPin = True
else:
self.checkPinInterval = True
iMinutes = float(self.storeServicePin)
iSeconds = int(iMinutes * 60)
self.pinIntervalSeconds = iSeconds
def standbyCounterCallback(self, configElement):
self.resetSessionPin()
def resetSessionPin(self):
#Reset the session pin, stop the timer
self.sessionPinCached = False
self.hideBlacklist()
def getCurrentTimeStamp(self):
return time.time()
def getPinList(self):
return [x.value for x in config.ParentalControl.servicepin]
def setSessionPinCached(self):
if self.checkSessionPin == True:
self.sessionPinCached = True
if self.checkPinInterval == True:
self.sessionPinCached = True
self.sessionPinTimer.startLongTimer(self.pinIntervalSeconds)
def servicePinEntered(self, service, result=None):
if result:
self.setSessionPinCached()
self.hideBlacklist()
self.callback(ref=service)
elif result == False:
messageText = _("The pin code you entered is wrong.")
if self.session:
self.session.open(MessageBox, messageText, MessageBox.TYPE_INFO, timeout=3)
else:
AddPopup(messageText, MessageBox.TYPE_ERROR, timeout=3)
def saveListToFile(self, sWhichList, vList):
#Replaces saveWhiteList and saveBlackList:
#I don't like to have two functions with identical code...
file = open(resolveFilename(SCOPE_CONFIG, sWhichList), 'w')
for sService, sType in vList.iteritems():
#Only Services that are selected directly and Bouqets are saved.
#Services that are added by a bouquet are not saved.
#This is the reason for the change in self.whitelist and self.blacklist
if TYPE_SERVICE in sType or TYPE_BOUQUET in sType:
file.write(str(sService) + "\n")
file.close()
def openListFromFile(self, sWhichList):
#Replaces openWhiteList and openBlackList:
#I don't like to have two functions with identical code...
result = {}
try:
file = open(resolveFilename(SCOPE_CONFIG, sWhichList), 'r')
for x in file:
sPlain = x.strip()
self.serviceMethodWrapper(sPlain, self.addServiceToList, result)
file.close()
except:
pass
return result
def addServiceToList(self, service, type, vList):
#Replaces addWhitelistService and addBlacklistService
#The lists are not only lists of service references any more.
#They are named lists with the service as key and an array of types as value:
if service in vList:
if not type in vList[service]:
vList[service].append(type)
else:
vList[service] = [type]
def removeServiceFromList(self, service, type, vList):
#Replaces deleteWhitelistService and deleteBlacklistService
if service in vList:
if type in vList[service]:
vList[service].remove(type)
if not vList[service]:
del vList[service]
def readServicesFromBouquet(self, sBouquetSelection, formatstring):
#This method gives back a list of services for a given bouquet
from enigma import eServiceCenter, eServiceReference
serviceHandler = eServiceCenter.getInstance()
refstr = sBouquetSelection
root = eServiceReference(refstr)
list = serviceHandler.list(root)
if list is not None:
services = list.getContent("CN", True) #(servicecomparestring, name)
return services
def save(self):
self.saveListToFile(LIST_BLACKLIST, self.blacklist)
def open(self):
self.blacklist = self.openListFromFile(LIST_BLACKLIST)
self.hideBlacklist()
if not self.filesOpened:
# Reset PIN cache on standby: Use StandbyCounter- Config- Callback
config.misc.standbyCounter.addNotifier(self.standbyCounterCallback, initial_call=False)
self.filesOpened = True
def __getattr__(self, name):
# This method is called if we lack a property. I'm lazy, so
# I load the files when someone 'hits' this code
if name in ('blacklist', 'whitelist'):
if not self.filesOpened:
self.open()
return getattr(self, name)
raise AttributeError, name
def hideBlacklist(self):
if self.blacklist:
if config.ParentalControl.servicepinactive.value and config.ParentalControl.storeservicepin.value != "never" and config.ParentalControl.hideBlacklist.value and not self.sessionPinCached:
for ref in self.blacklist:
if TYPE_BOUQUET not in ref:
eDVBDB.getInstance().addFlag(eServiceReference(ref), 2)
else:
for ref in self.blacklist:
if TYPE_BOUQUET not in ref:
eDVBDB.getInstance().removeFlag(eServiceReference(ref), 2)
refreshServiceList()
``` |
[
{
"content": "```python\n'''\nHandle connections from a client greeter. Provide information such as\nsub-servers and news.\n'''\nimport asyncore\nimport socket\nimport struct\n\n\nclass Handler(asyncore.dispatcher_with_send):\n def handle_read(self):\n data = self.recv(1)\n if data == b'\\x00':... | [
{
"content": "<|memory_start|>```python\n'''\nHandle connections from a client greeter. Provide information such as\nsub-servers and news.\n'''\nimport asyncore\nimport socket\nimport struct\n\n\nclass Handler(asyncore.dispatcher_with_send):\n def handle_read(self):\n data = self.recv(1)\n if d... | ```python
'''
Handle connections from a client greeter. Provide information such as
sub-servers and news.
'''
import asyncore
import socket
import struct
class Handler(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(1)
if data == b'\x00':
# TODO: load alternate server names from the config
self.send(struct.pack('16s16s16s16s', b'localhost', b'localhost',
b'localhost', b'localhost'))
elif data == b'\x01':
self.send(b'Welcome to the new world')
class Server(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accepted(self, sock, addr):
print('[*] Got a new connection')
handler = Handler(sock)
def handle_accept(self):
# Required for Python 3.1
print('[*] Got a new connection')
pair = self.accept()
if pair is not None:
handler = Handler(pair[0])
``` |
[
{
"content": "Return the code unaltered:\n```python\n# Copyright (C) 2011 Groza Cristian\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# Copyright (C) 2011 Groza Cristian\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of... | ```python
# Copyright (C) 2011 Groza Cristian
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import wx.lib.inspection
import sys, types
import collections
from Configuration import *
from Logger import *
from AboutWindow import *
from SyntaxHighlight import *
from ConfigWindow import *
from PrettyPrinter import *
from FindReplaceText import *
from AutoComplet import *
from StcTextCtrl import *
from Menu import *
from Toolbar import *
from gEcritPluginManager import *
from yapsy.PluginManager import PluginManager
from data.plugins.categories import *
from AuiNoteBook import *
from gEcritSession import *
import Exceptions
import wx.aui
import gettext
import logging
logging.basicConfig(level=logging.DEBUG)
class gEcrit(wx.Frame):
"""
Editor
This class is the entry point in the program.
It creates all the user interface and initializes
the required objects and classes.
The functions that cannot go into another objects
for diverse reasons go here.
"""
def dummy_tr(self, tr):
return tr
def __init__(self, id, parent):
"""
__init__
Creates the user interface.
Initializez the terminals if enabled.
Creates the required GUI and non GUI objects.
"""
BOTTOMPANEL_ID = 4002
SIDEPANEL_ID = 3999
try:
self.presLang = gettext.translation("gEcrit", "./locale")
self._ = self.presLang.ugettext
self.presLang.install()
except:
print("Translation for local language not found.")
self._ = self.dummy_tr
pathname = os.path.abspath(os.path.dirname((sys.argv)[0])) # Finding where
os.chdir(pathname) # gEcrit is running
#Setting up the plugin envirenment
self.general_plugins = {}
self.passive_plugins = {}
self.plugin_manager = PluginManager(
categories_filter={"General": General,
"Passives" : Passive})
#Sets YAPSY the plugin directory.
self.plugin_path = os.path.join(pathname, "data", "plugins")
self.plugin_manager.setPluginPlaces([self.plugin_path])
self.plugin_manager.locatePlugins()
#self.plugin_manager.collectPlugins()
self.plugin_manager.loadPlugins()
self.activated_plugins = Config.GetOption("ActivePlugins")
#populating the general plugin index
for f in self.plugin_manager.getPluginsOfCategory("General"):
if f.plugin_object.name in self.activated_plugins:
self.general_plugins[f.plugin_object.name] = f.plugin_object
#the passive plugins now
for p in self.plugin_manager.getPluginsOfCategory("Passives"):
if p.plugin_object.name in self.activated_plugins:
self.passive_plugins[p.plugin_object.name] = p.plugin_object
self.id_range = []
#getting the command line file argument
if "gEcrit.py" not in (sys.argv)[-1]:
target_file = os.path.normpath(os.path.realpath(sys.argv[-1]))
#no file was provided
else:
target_file = None
wx.Frame.__init__(self, parent, 1000, 'gEcrit', size=(700, 600))
self.Bind(wx.EVT_CLOSE, self.OnQuit)
#this object will handle layout and docking/undocking of widgets
self.aui_manager = wx.aui.AuiManager(self)
#creating the status bar
self.status_bar = self.CreateStatusBar()
self.status_bar.SetStatusText("Done")
self.status_bar.SetFieldsCount(3)
self.status_bar.SetId(999)
if not Config.GetOption("StatusBar"):
self.status_bar.Hide()
self.menubar = MainMenu(self)
self.SetMenuBar(self.menubar)
#setting the application icon
self.SetIcon(wx.Icon('icons/gEcrit.png', wx.BITMAP_TYPE_PNG))
#this variable is incremented each time we create a StcControl
self.text_id = 0
#finding the user home folder
self.HOMEDIR = os.path.expanduser('~')
os.chdir(os.path.abspath(self.HOMEDIR))
#creating a plugin manager instance
self.plugin_conf_manager = gEcritPluginManager(self)
#creating the left side notebook
self.side_notebook = wx.aui.AuiNotebook(self, id=SIDEPANEL_ID, size=(-1,-1),
style=wx.BORDER_SUNKEN|wx.aui.AUI_NB_TAB_SPLIT|wx.aui.AUI_NB_TAB_MOVE|wx.aui.AUI_NB_SCROLL_BUTTONS )
#creating the bottom side notebook
self.bottom_notebook = wx.aui.AuiNotebook(self, id=BOTTOMPANEL_ID, size=(-1,
-1), style=wx.BORDER_SUNKEN|wx.aui.AUI_NB_TAB_SPLIT|wx.aui.AUI_NB_TAB_MOVE|wx.aui.AUI_NB_SCROLL_BUTTONS )
#the aui notebook that will manage editor tabs
self.nb = AuiNoteBook(parent = self)
#going back to application running point
os.chdir(pathname)
#binding the menubar events
f = wx.FindWindowById
self.Bind(wx.EVT_MENU, lambda event: self.NewTab(event,
"New Document", "New Document"), id=500)
self.Bind(wx.EVT_MENU, lambda event: self.OnOpenFile(event), id=
501)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).Save(event),
id=502)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).SaveAs(event),
id=503)
self.Bind(wx.EVT_MENU, self.OnPrint,id=504)
self.Bind(wx.EVT_MENU, lambda event: self.OnMenuCloseTab(event,
(self.id_range)[self.nb.GetSelection()]), id=505)
self.Bind(wx.EVT_MENU, lambda event: self.OnQuit(event), id=506)
self.Bind(wx.EVT_MENU, self.SaveAll, id=563)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnReload(event),id = 507)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnUndo(event),
id=520)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnRedo(event),
id=521)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnCut(event),
id=522)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnCopy(event),
id=523)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnPaste(event),
id=524)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnSelectAll(event),
id=525)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnSelectCodeBlock(event),
id=562)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnInsertDate(event),
id=526)
self.Bind(wx.EVT_MENU, lambda event: self.OnPrefs(event), id=527)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnDedent(event),
id=528)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnIndent(event),
id=529)
self.Bind(wx.EVT_MENU, lambda event:f((self.id_range)[self.nb.GetSelection()]).OnComment(event),
id=559)
self.Bind(wx.EVT_MENU, lambda event:f((self.id_range)[self.nb.GetSelection()]).OnUnComment(event),
id=560)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.FindDocText(event, (self.id_range)[self.nb.GetSelection()]),
id=530)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.ReplaceDocText(event, (self.id_range)[self.nb.GetSelection()]),
id=531)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.FindDocText(event, (self.id_range)[self.nb.GetSelection()],wx.stc.STC_FIND_REGEXP),
id=532)
self.Bind(wx.EVT_MENU, lambda event: FindRepl.ReplaceDocText(event ,(self.id_range)[self.nb.GetSelection()], wx.stc.STC_FIND_REGEXP),
id=533)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnStartRecordMacro(event), id=534)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnStopRecordMacro(event), id=542)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnMacroPlayback(event), id=543)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnZoomIn(event),
id=535)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnZoomOut(event),
id=536)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnResetZoom(event),
id=537)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("LineNumbers",
self.menubar.IsChecked(538), self.id_range), id=538)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("FoldMarks",
self.menubar.IsChecked(539), self.id_range), id=539)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("Whitespace",
self.menubar.IsChecked(540), self.id_range), id=540)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("IndetationGuides",
self.menubar.IsChecked(541), self.id_range), id=541)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("EdgeLine",
self.menubar.IsChecked(546), self.id_range), id=546)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("SyntaxHighlight",
self.menubar.IsChecked(547), self.id_range), id=547)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("StatusBar",
self.menubar.IsChecked(545), self.id_range), id=545)
self.Bind(wx.EVT_MENU, self.OnFullScreen, id=557)
self.Bind(wx.EVT_MENU, self.ToggleSidePanel, id = 548)
self.Bind(wx.EVT_MENU, self.ToggleBottomPanel, id = 549)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).OnRemoveTrails(event),id=551)
self.Bind(wx.EVT_MENU, lambda event: self.OnRun(event,self.id_range[self.nb.GetSelection()]), id = 558)
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).Tabify(event), id = 552 )
self.Bind(wx.EVT_MENU, lambda event: f((self.id_range)[self.nb.GetSelection()]).UnTabify(event), id = 553 )
self.Bind(wx.EVT_MENU, self.SaveSessionFile , id = 554)
self.Bind(wx.EVT_MENU, gEcritSession.DeleteSessionFile , id = 555)
self.Bind(wx.EVT_MENU, lambda event: Config.ChangeOption("Session",self.menubar.IsChecked(556)) , id = 556)
self.Bind(wx.EVT_MENU, self.plugin_conf_manager.ShowMe, id = 564 )
self.Bind(wx.EVT_MENU, lambda event: self.OnAbout(event), id=550)
#setting up the toolbar
self.toolbar = MainToolbar(self, -1)
self.FontCtrl = wx.FontPickerCtrl(self.toolbar, 607, size=(100,
30))
self.Bind(wx.EVT_FONTPICKER_CHANGED, lambda event: ChangeFont(event,
self.FontCtrl.GetSelectedFont(), self.id_range))
#the goto line text box
self.toolbar.AddControl(self.FontCtrl)
self.toolbar.AddControl(wx.TextCtrl(self.toolbar, 608, size=(-1,
-1), style=wx.TE_PROCESS_ENTER))
#Binding toolbar events
self.Bind(wx.EVT_TOOL, lambda event: self.NewTab(event,
"New Document", "New Document"), id=600)
self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=601)
self.Bind(wx.EVT_TOOL, lambda event: f((self.id_range)[self.nb.GetSelection()]).Save(event),
id=602)
self.Bind(wx.EVT_TOOL, lambda event: f((self.id_range)[self.nb.GetSelection()]).SaveAs(event),
id=603)
self.Bind(wx.EVT_TOOL, self.OnPrefs, id=604)
self.Bind(wx.EVT_TOOL, self.OnQuit, id=605)
self.Bind(wx.EVT_TEXT_ENTER, lambda event: self.OnGotoBox(event,
(self.id_range)[self.nb.GetSelection()]), id=608)
self.Bind(wx.EVT_TOOL, self.OnPrint, id=609)
self.Bind(wx.EVT_TOOL, lambda event: self.OnRun(event, (self.id_range)[self.nb.GetSelection()]),
id=610)
#Give the plugins a chance to set themselves in the system
#generals first
for g in self.general_plugins:
self.general_plugins[g].Init(self)
#passives now
for p in self.passive_plugins:
self.passive_plugins[p].Init(self)
#put it in the middle of the sceen
self.Centre()
#the preferences window
self.conf_win = ConfFrame = CfgFrame(self)
#addung the pane to the aui manager.
self.aui_manager.AddPane(self.toolbar, wx.aui.AuiPaneInfo().Name("toolbar").Caption(self._("Toolbar")).ToolbarPane().Top().CloseButton(False))
self.aui_manager.AddPane(self.nb, wx.aui.AuiPaneInfo().Name("editor tabs").Caption(self._("Tabs")).CenterPane())
self.aui_manager.AddPane(self.bottom_notebook, wx.aui.AuiPaneInfo().Name("bottom panel").Caption(self._("Assistants and others")).Bottom().BestSize((700,150)).PinButton(True).MaximizeButton(True))
self.aui_manager.AddPane(self.side_notebook, wx.aui.AuiPaneInfo().Name("left_side panel").Caption(self._("Toolbox")).Left().BestSize((150,400)).PinButton(True).MaximizeButton(True))
#loading saved session if any exists and if enabled
if Config.GetOption("Session"):
self.LoadSessionFile()
#make changes visible
self.aui_manager.Update()
if target_file: #load command line file path argument
self.NewTab(0, os.path.split(target_file)[-1], target_file)
def LoadSessionFile(self):
"""
LoadSessionFile
Loads the session file if it exists.
If it does not, creates an instance.
"""
try:
self.session = gEcritSession.LoadFromFile()
self.session.RestoreAppState(self)
self.SetStatus(0,self._ ( "Session file loaded."))
except Exceptions.NoSessionFile:
self.session = gEcritSession()
def SaveSessionFile(self, event):
"""
SaveSessionFile
Reccords the application state and saves it to disk via the
session instance.
"""
try: #testing if a session object exists
self.session
except AttributeError:
self.session = gEcritSession()
self.session.RecordAppState(self)
self.session.SaveToFile()
self.SetStatus(event, self._ ("Session saved."))
def OnFullScreen(self,event):
"""
OnFullScreen
Makes the main window fullscreen.
"""
self.ShowFullScreen(not self.IsFullScreen(),wx.FULLSCREEN_NOCAPTION)
def OnPrefs(self, event):
"""
OnPrefs
Shows the preferences window.
"""
self.conf_win.ShowMe(0)
def NewTab(self, event, nb, file_path):
"""
NewTab
Creates a new AUI NOTEBOOK tab, adds the contents,
initializez a STC object for it and binds some of its events.
Creates the sidebar, adds a notebook and adds its utilities
in its tabs.
"""
if not file_path:
return
#update recent file list
if file_path != "New Document" and file_path != "":
if not os.path.exists(file_path):
wx.MessageDialog(None, self._ ("Could not load file.\nThe file ")+file_path+self._ (" does not exists."),self._ ("Input Error") ,wx.OK).ShowModal()
return
lst = Config.GetOption("RecentFiles")
lst.append(file_path)
Config.ChangeOption("RecentFiles",lst)
self.menubar.UpdateRecentFiles()
#the parent of the StcControl
panel = wx.Panel(self)
panel.identifierTag = nb
#hiding self.text_id
text_id = self.text_id
#set up the editor
text_ctrl = StcTextCtrl(panel, self.text_id, file_path)
#the StcControl sizer
text_ctrl_sizer = wx.BoxSizer(wx.HORIZONTAL)
text_ctrl_sizer.Add(text_ctrl, 1, wx.EXPAND)
panel.SetSizer(text_ctrl_sizer)
panel.Fit()
#append the id of this StcControl to the id_range
self.id_range.append(text_id)
text_ctrl.SetBufferedDraw(True)
#apply the font
text_ctrl.StyleSetFont(0, self.FontCtrl.GetSelectedFont())
#add the panel as a new tab
self.nb.AddPage(panel, str(nb), select=True)
if file_path == "New Document" or file_path == "":
#notify plugins
for g in self.general_plugins:
self.general_plugins[g].NotifyNewTabOpened()
self.text_id += 1
return text_ctrl
def OnRun(self, event, text_id):
"""
Runs the current document in a xterm window, for testing.
"""
cur_doc = wx.FindWindowById(text_id)
cur_doc.Save(0)
os.system("xterm -e sh runner.sh "+cur_doc.GetFilePath())
def OnGotoBox(self, event, text_id):
"""
OnGotoBox
Finds the current document, and scrolls to the line indicated
by its input upon the Return key.
"""
cur_doc = wx.FindWindowById(text_id)
goto = wx.FindWindowById(608)
scroll_pos = int(goto.GetLineText(0))
cur_doc.ScrollToLine(scroll_pos - 1)
def OnPrint(self, event):
"""
OnPrint
Finds the document, sets the prints name, and calls the
wxPython toolkit to print the contents
"""
print_dlg = PrettyPrinter(self)
del print_dlg
def OnAbout(self, event):
"""
OnAbout
Shows the about window.
"""
#ShowAbout = AboutWindow
about_win = AboutWindow()
del about_win
def OnQuit(self, event):
"""
OnQuit
Closes the main window, stops the terminals, and kills the
application process.
It promps the user for confirmation.
"""
#warn the user
warn_dlg = wx.MessageDialog(None,
self._ ("Please make sure that your data is\
saved.\nAre you sure you want to quit?"),
self._ ("Are you sure?"), style=wx.YES_NO)
warn_dlg_val = warn_dlg.ShowModal()
if warn_dlg_val != 5104: #YES
#call the quit method to stop the terminals and the plugins
self.Quit()
def Quit(self):
#stop ond notify all plugins of application shutdown.
#generals now
for g in self.general_plugins:
self.general_plugins[g].Stop()
for p in self.passive_plugins:
self.passive_plugins[p].Stop()
#stop the shells if activated
if Config.GetOption("Session"):
self.SaveSessionFile(0)
#exit status 0, all ok
sys.exit(0)
def OnMenuCloseTab(self, event, text_id):
self.ManageCloseTab(False, text_id)
def ManageCloseTab(self, event, text_id):
"""
ManageCloseTab
Manages the process of closing a tab.
Checks if document is saved, prompts the user if not.
If this is the last tab in the application, it closes the
terminals, the window and kills the application.
If not, it decreases the number of tabs and delted the AUI
NETBOOK page.
"""
cur_doc = wx.FindWindowById(text_id)
current_text = cur_doc.GetText()
#check if the user saved the changes
if cur_doc.save_record != current_text:
#if not, notify him
save_prompt = wx.MessageDialog(None, self._ ("The file ") + os.path.split(cur_doc.GetFilePath())[-1] +
self._ (" is not saved.\n\
Do you wish to save it?"), "",
style=wx.CANCEL | wx.YES | wx.NO)
prompt_val_ = save_prompt.ShowModal()
if prompt_val_ == 5103: #YES
if not cur_doc.Save(0):
event.Veto()
return
else:
self.id_range.remove(text_id)
elif prompt_val_ == 5101: #CANCEL
event.Veto()
return
elif prompt_val_ == 5104: #NO
self.id_range.remove(text_id)
save_prompt.Destroy()
else:
self.id_range.remove(text_id)
# skip the event and let the AuiNotebook handle the deletion
cur_doc.Deactivate() # tell the StcTextCtrl to prepare for deletition
if not event: # check if it was fired from menu
self.nb.DeletePage(self.nb.GetSelection())
else:
event.Skip()
def OnOpenFile(self, event):
"""
OnOpenFile
Collects a path for a new file via a file dialog.
"""
open_file_dlg = wx.FileDialog(None, style=wx.OPEN | wx.FD_MULTIPLE)
if self.menubar.last_recent != "":
#go to the last accessed folder
open_file_dlg.SetDirectory(os.path.split(self.menubar.last_recent)[0])
else:
open_file_dlg.SetDirectory(self.HOMEDIR)
if open_file_dlg.ShowModal() == wx.ID_OK:
paths = open_file_dlg.GetPaths()
self.OpenFile(paths)
del open_file_dlg
def OpenFile(self, paths):
"""
OpenFile
Calls NewTab with the collected path.
Supports multiple path selection.
"""
# if paths is a list, open an StcContrel for each of them
if isinstance(paths, types.ListType):
for f in paths:
self.NewTab(0, os.path.split(f)[-1], f)
Log.AddLogEntry(self._ ("Opened file ") + f)
#if a string, open an StcControl for it
else:
self.NewTab(0, os.path.split(paths)[-1], paths)
Log.AddLogEntry(self._ ("Opened file ") + paths)
#notify general plugins
for t in self.general_plugins:
try: #insulate from possible plugin errors
self.general_plugins[t].NotifyDocumentOpened()
except: pass
AutoComp.UpdateCTagsFiles(self.id_range)
def SetStatus(self, event, text):
"""
ResetStatus
Sets the status of statusbar.
"""
self.status_bar.SetStatusText(text)
# event.Skip()
def ResetStatus(self, event):
"""
ResetStatus
Sets the status bar status to nothing.
"""
self.status_bar.SetStatusText("")
event.Skip()
def SaveAll(self, event):
"""
SaveAll
Saves all the current documents using the
objects Save function.
"""
for id in self.id_range:
cur_doc = wx.FindWindowById(id)
if cur_doc.GetFilePath() != "" and cur_doc.GetFilePath() != \
"New Document":
cur_doc.Save(0)
####################################################################
# PLUGIN INTERFACE #
####################################################################
def ToggleSidePanel(self, event):
pane = self.aui_manager.GetPane(self.side_notebook)
if pane.IsShown(): pane.Hide()
else: pane.Show()
self.aui_manager.Update()
def ToggleBottomPanel(self, event):
pane = self.aui_manager.GetPane(self.bottom_notebook)
if pane.IsShown(): pane.Hide()
else: pane.Show()
self.aui_manager.Update()
def GetCurrentDocument(self):
"""
GetCurrentDocument
Returns the selected active buffer object.
"""
try:
return wx.FindWindowById(self.id_range[self.nb.GetSelection()])
except IndexError:
return None
def GetAllDocuments(self):
"""
GetALlDocuments
Returns all existing buffers.
"""
docs = []
for d in self.id_range:
docs.append(wx.FindWindowById((d)))
return docs
def AddToMenuBar(self,label,menu):
"""
AddToMenuBar
@id The id of the new menu entry.
@label The label of the new menu entry.
@menu A wx.Menu object which will be added in the Plugins menu.
Adds a wx.Menu object to menubar.
"""
return self.menubar.plugins.AppendMenu(-1,label,menu)
def RemoveFromMenubar(self, menu):
"""
RemoveFromMenubar
Removes the supplied argument menu from the plugins submenu.
"""
self.menubar.plugins.RemoveItem(menu)
def BindMenubarEvent(self, item, function):
"""
BindMenuBarEvent
@item The menu entry object which to be bint.
@function The function the item to be bint to.
Binds a wx.EVT_MENU event to the suplied function.
"""
self.Bind(wx.EVT_MENU, function, id = item.GetId())
def GetBottomPanel(self):
"""
GetBottomPanel
Returns the lower notebook.
"""
return self.bottom_notebook
def AddToBottomPanel(self, panel, name):
"""
AddToBottomPanel
Adds the suplied panel to the lower notebook with tho supplied
name label.
"""
self.bottom_notebook.AddPage(panel, name)
def GetSidePanel(self):
"""
GetSidePanel
Returns the side notebook.
"""
return self.side_notebook
def AddToSidePanel(self, panel, name):
"""
AddToSidePanel
Adds the suplied panel to the side notebook with tho supplied
name label.
"""
self.side_notebook.AddPage(panel, name)
def DeleteBottomPage(self, name):
"""
DeleteBottomPage
Deletes the tab named name from the lower notebook.
"""
self.bottom_notebook.DeletePage(Config.GetTab(name,
self.bottom_notebook))
def DeleteSidePage(self, name):
"""
DeleteSidePage
Deletes the tab named name from the side notebook.
"""
self.side_notebook.DeletePage(Config.GetTab(name,
self.side_notebook))
def AddPaneToAui(self, widget ,pane_info):
"""
"AddPaneToAui
@widget the widget to be added
@pane needs to be an AuiPaneInfo object.
Adds the pane to the aui manager.
"""
self.aui_manager.AddPane(widget, pane_info)
def AddToolbarToAui(self, toolbar, pane_info):
"""
AddToosbartoAui
@toolbar the wx.Toolbar object
@pane_info needs to be a wx.AuiPaneInfo object with it's name and caption
defined.
"""
self.aui_manager.AddPane(toolbar, pane_info.ToolbarPane().Top().CloseButton(False))
def GetAuiManager(self):
"""
GetAuiManager
Returns the AuiManager that is responsable for window layout.
"""
return self.aui_manager
def GetTabManager(self):
"""
GetTabManager
Returns the AuiNoteBook that is resposible for tabs management.
"""
return self.nb
def CreateNewDocument(self, name):
"""
CreateNewDocument
@name a string to be given to the new document as a name.
Creates a new empty document.
Returns a reference to the now StcControl
"""
return self.NewTab(0, name, "")
def main():
app = wx.PySimpleApp()
frame = gEcrit(parent=None, id=-1)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is the script:\n```python\nimport neurolab as nl\nimport game2\nimport othello\nimport ntuplesystematic as nts\nimport time\nimport random\n\nclass nn:\n\tdef __init__(self):\n\t\tself.x = [[-1,1] for x in range(64)]\n\t\tself.net = nl.net.newff(self.x,[1])\n\t\t#self.net.trainf = nl.train.tr... | [
{
"content": "Here is the script:\n<|memory_start|>```python\nimport neurolab as nl\nimport game2\nimport othello\nimport ntuplesystematic as nts\nimport time\nimport random\n\nclass nn:\n\tdef __init__(self):\n\t\tself.x = [[-1,1] for x in range(64)]\n\t\tself.net = nl.net.newff(self.x,[1])\n\t\t#self.net.trai... | ```python
import neurolab as nl
import game2
import othello
import ntuplesystematic as nts
import time
import random
class nn:
def __init__(self):
self.x = [[-1,1] for x in range(64)]
self.net = nl.net.newff(self.x,[1])
#self.net.trainf = nl.train.train_gd
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def play_move(self,game,epsilon = 0):
moves = game.generate_moves()
num = random.uniform(0,1)
if(num <= epsilon):
temp = game.copy()
if(game.player==-1):
if(self.moveb == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.moveb = self.moveb+1
self.last_vb = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 0
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 1
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (v1[0],move)
else:
if(self.movew == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.movew = self.movew+1
self.last_vw = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 2
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 3
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (v1[0],move)
else:
if(game.player == -1):
if(self.moveb==0):
j=0
max1 = 0
best_v = 0
best_move = None
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.moveb = self.moveb+1
self.last_vb = best_v
return (max1, best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 4
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 5
#print self.last_vw
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = best_v
return (max1,best_move)
else:
if(self.movew==0):
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.movew = self.movew+1
self.last_vw = best_v
return (max1,best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 6
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 7
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = best_v
return (max1,best_move)
def reset(self):
#print self.fin_v
#print self.fin_val
error = self.net.train(self.fin_v,self.fin_val,epochs=5,show=1)
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def reset_without_train(self):
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
if __name__ == "__main__":
"""
Creates a main player
"""
playernew = nn()
nTuplesSystematicObject = nts.nTuplesSystematic()
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x)),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), True)
playernew.reset_without_train()
time.sleep(5)
k = 100
for i in range(k):
print(i)
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x,0.3)),game2.player(lambda x: playernew.play_move(x,0.3)), False)
playernew.reset()
wins = [0, 0]
for i in range(100):
winner = game2.play(othello.game(), game2.player_epsilon(lambda x: playernew.play_move(x)),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), False)
if winner == 1:
wins[0] += 1
elif winner == 2:
wins[1] += 1
winner = game2.play(othello.game(),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player_epsilon(lambda x: playernew.play_move(x)), False)
if winner == 2:
wins[0] += 1
elif winner == 1:
wins[1] += 1
print wins
f = open('results','a')
val = (k,0.001,'epsilon',wins)
val = str(val)
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of HEPData.\n# Copyright (C) 2021 CERN.\n#\n# HEPData is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public License as\n# published by the Free Software Found... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# This file is part of HEPData.\n# Copyright (C) 2021 CERN.\n#\n# HEPData is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public License as\n# published by the Fre... | ```python
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2021 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
import pytest
import yaml
def test_parse_trailing_tab_libyaml():
"""
Check that PyYAML (with LibYAML) can parse a trailing tab character.
Currently this is only possible with LibYAML, not with pure-Python PyYAML.
:return:
"""
data = yaml.load('key: value\t', Loader=yaml.CSafeLoader)
assert data['key'] == 'value'
def test_parse_trailing_tab_pyyaml():
"""
Latest PyYAML v5.4.1 (pure Python) currently has a bug parsing a trailing tab character.
https://github.com/yaml/pyyaml/issues/306 and https://github.com/yaml/pyyaml/issues/450
:return:
"""
with pytest.raises(yaml.scanner.ScannerError):
yaml.load('key: value\t', Loader=yaml.SafeLoader)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nfrom flask import render_template, request\nimport numbers\nfrom pprint import pprint\nfrom arps.restserver import app, db, ApiException, apiview\nfrom arps.globals import *\nfrom arps.validation import get_schemas_for_endpoint\nfrom a... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom flask import render_template, request\nimport numbers\nfrom pprint import pprint\nfrom arps.restserver import app, db, ApiException, apiview\nfrom arps.globals import *\nfrom arps.validation import get_schemas_for_... | ```python
from flask import render_template, request
import numbers
from pprint import pprint
from arps.restserver import app, db, ApiException, apiview
from arps.globals import *
from arps.validation import get_schemas_for_endpoint
from arps.models import *
METHODS = ['GET', 'POST', 'PUT', 'DELETE']
@app.route('/')
def main():
l = []
rules = [rule for rule in sorted(list(app.url_map.iter_rules()), key=lambda rule: rule.rule) if rule.rule.startswith('/api/')]
for rule in rules:
schema_request, schema_response = get_schemas_for_endpoint(rule.endpoint)
l.append({
'path': rule.rule,
'methods': sorted([method for method in rule.methods if method in METHODS]),
'endpoint': rule.endpoint,
'schema_request': schema_request,
'schema_response': schema_response,
'doc': str(app.view_functions[rule.endpoint].__doc__).strip()
})
return render_template('apidoc.html', rules=l)
def populate_object(o :object, d: dict):
changed = set()
unchanged = set()
unkown = set()
for key, value in d.items():
if not isinstance(value, (str, numbers.Number, bool)):
unkown.add(key)
continue
if hasattr(o, key):
if getattr(o, key) == value:
unchanged.add(key)
else:
setattr(o, key, value)
changed.add(key)
else:
unkown.add(key)
return changed, unchanged, unkown
def get_object_or_404(model, *criterion, message=""):
r = db.session.query(model).get(criterion)
if r is None:
raise ApiException(message, code=404)
else:
return r
@app.route('/api/v1.0/users/', methods=['GET'], endpoint=endpoint_user_list)
@apiview()
def user_list():
"""
Return a list of all users
"""
users = db.session.query(User).all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
@app.route('/api/v1.0/users/<int:user_id>', methods=['GET'], endpoint=endpoint_user_get)
@apiview()
def user_get(user_id):
"""
Return the user with an specific id.
"""
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/users/<int:user_id>', methods=['PUT', 'OPTIONS'], endpoint=endpoint_user_update)
@apiview(needs_json_in_request=True)
def user_update(user_id):
"""
Update the user with the given id with the dictionary provided. All fields are optional.
If the id field is given, it must be the same value as the url leaf.
When updating the user, no fields are required.
"""
data = request.json['content']
if data.get('id', user_id) != user_id:
raise ApiException("User ID in json body and in url must be the same.")
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
populate_object(user, data)
if 'roles' in data:
user.set_roles(data['roles'])
db.session.commit()
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/roles/', methods=['GET'], endpoint=endpoint_role_list)
@apiview()
def role_list():
"""
Return a list of all roles
"""
roles = Role.query.all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
``` |
[
{
"content": "Here is the script:\n```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Calculate the pitch needed to maintain contact between the front wheel and\nground.\n\"\"\"\nfrom sympy import simplify, symbols\nfrom sympy.physics.mechanics import ReferenceFrame, Point\nfrom sympy.physics.mec... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Calculate the pitch needed to maintain contact between the front wheel and\nground.\n\"\"\"\nfrom sympy import simplify, symbols\nfrom sympy.physics.mechanics import ReferenceFrame, Point\nfrom s... | ```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Calculate the pitch needed to maintain contact between the front wheel and
ground.
"""
from sympy import simplify, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import msprint
from mpmath import findroot
from sympy.utilities import lambdify
from sympy.printing import ccode
import textwrap
## define coordinates
# phi: roll
# theta: pitch
# delta: steer
phi, theta, delta = symbols('φ θ δ')
# rR: rear radius
# rF: front radius
rR, rF = symbols('rR rF')
# cR: distance from rear wheel center to steer axis
# cF: distance from front wheel center to steer axis
# ls: steer axis separation
cR, cF, ls = symbols('cR cF ls')
benchmark_parameters = {
rR: 0.3,
rF: 0.35,
cR: 0.9534570696121847,
ls: 0.2676445084476887,
cF: 0.0320714267276193,
}
## define reference frames
# N: inertial frame
# B: rear aseembly frame
# H: front assembly frame
N = ReferenceFrame('N')
B = N.orientnew('B', 'body', [0, phi, theta], 'zxy') # yaw is ignored
H = B.orientnew('H', 'axis', [delta, B.z])
## define points
# rear wheel/ground contact point
pP = Point('P')
# define unit vectors from rear/front wheel centers to ground
# along the wheel plane
R_z = ((B.y ^ N.z) ^ B.y).normalize()
F_z = ((H.y ^ N.z) ^ H.y).normalize()
# define rear wheel center point
pRs = pP.locatenew('R*', -rR*R_z)
# "top" of steer axis, point of SA closest to R*
# orthogonal projection of rear wheel center on steer axis
pRh = pRs.locatenew('R^', cR*B.x)
# orthogonal projection of front wheel center on steer axis
pFh = pRh.locatenew('S^', ls*B.z)
# front wheel center point
pFs = pFh.locatenew('S*', cF*H.x)
# front wheel/ground contact point
pQ = pFs.locatenew('Q', rF*F_z)
# N.z component of vector to pQ from pP
# this is our configuration constraint
f = simplify(pQ.pos_from(pP) & N.z)
print("f = {}\n".format(msprint(f)))
# calculate the derivative of f for use with Newton-Raphson
df = f.diff(theta)
print("df/dθ = {}\n".format(msprint(df)))
# constraint function for zero steer/lean configuration and
# using the benchmark parameters
f0 = lambdify(theta, f.subs({phi: 0, delta: 0}).subs(benchmark_parameters))
df0 = lambdify(theta, df.subs({phi: 0, delta: 0}).subs(benchmark_parameters))
print("verifying constraint equations are correct")
print("for zero steer/lean, pitch should be pi/10")
findroot(f0, 0.3, solver="newton", tol=1e-8, verbose=True, df=df0)
# convert to moore parameters
c_sym = symbols('x[1] pitch x[2] m_rr m_rf m_d1 m_d3 m_d2')
c_sym_dict = dict(zip([phi, theta, delta, rR, rF, cR, cF, ls], c_sym))
fc = ccode(f.subs(c_sym_dict))
dfc = ccode(df.subs(c_sym_dict))
cpp_math = {
'cos': 'std::cos',
'sin': 'std::sin',
'pow': 'std::pow',
'sqrt': 'std::sqrt',
}
fcs = fc
dfcs = dfc
for k, v in cpp_math.items():
fcs = fcs.replace(k, v)
dfcs = dfcs.replace(k, v)
print('\nf:')
print(textwrap.fill(fcs, 110, break_long_words=False))
print('\ndf:')
print(textwrap.fill(dfcs, 110, break_long_words=False))
``` |
[
{
"content": "Return the code unaltered:\n```python\n# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-04-21 23:16\nfrom __future__ import unicode_literals\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timez... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2017-04-21 23:16\nfrom __future__ import unicode_literals\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport dj... | ```python
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-21 23:16
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('password',
models.CharField(
max_length=128,
verbose_name='password')),
('last_login',
models.DateTimeField(
blank=True,
null=True,
verbose_name='last login')),
('is_superuser',
models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status')),
('username',
models.CharField(
error_messages={
'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name='username')),
('first_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='first name')),
('last_name',
models.CharField(
blank=True,
max_length=30,
verbose_name='last name')),
('email',
models.EmailField(
blank=True,
max_length=254,
verbose_name='email address')),
('is_staff',
models.BooleanField(
default=False,
help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status')),
('is_active',
models.BooleanField(
default=True,
help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.',
verbose_name='active')),
('date_joined',
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name='date joined')),
('user_type',
models.CharField(
choices=[
('patient',
'User Type Pacient'),
('monitor',
'User Type Monitor')],
default='monitor',
max_length=2)),
('groups',
models.ManyToManyField(
blank=True,
help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.',
related_name='user_set',
related_query_name='user',
to='auth.Group',
verbose_name='groups')),
('user_permissions',
models.ManyToManyField(
blank=True,
help_text='Specific permissions for this user.',
related_name='user_set',
related_query_name='user',
to='auth.Permission',
verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'users',
'abstract': False,
'verbose_name': 'user',
},
managers=[
('objects',
django.contrib.auth.models.UserManager()),
],
),
]
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\n flask_security.forms\n ~~~~~~~~~~~~~~~~~~~~\n\n Flask-Security forms module\n\n :copyright: (c) 2012 by Matt Wright.\n :copyright: (c) 2017 by CERN.\n :license: MIT, see LICENSE for more details.\n\"\"\"\... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\n flask_security.forms\n ~~~~~~~~~~~~~~~~~~~~\n\n Flask-Security forms module\n\n :copyright: (c) 2012 by Matt Wright.\n :copyright: (c) 2017 by CERN.\n :license: MIT, see LICENSE for more d... | ```python
# -*- coding: utf-8 -*-
"""
flask_security.forms
~~~~~~~~~~~~~~~~~~~~
Flask-Security forms module
:copyright: (c) 2012 by Matt Wright.
:copyright: (c) 2017 by CERN.
:license: MIT, see LICENSE for more details.
"""
import inspect
from flask import Markup, current_app, flash, request
from flask_login import current_user
from flask_wtf import FlaskForm as BaseForm
from speaklater import make_lazy_gettext
from wtforms import BooleanField, Field, HiddenField, PasswordField, \
StringField, SubmitField, ValidationError, validators
from .confirmable import requires_confirmation
from .utils import _, _datastore, config_value, get_message, \
localize_callback, url_for_security, validate_redirect_url, \
verify_and_update_password
lazy_gettext = make_lazy_gettext(lambda: localize_callback)
_default_field_labels = {
'email': _('Email Address'),
'password': _('Password'),
'remember_me': _('Remember Me'),
'login': _('Login'),
'register': _('Register'),
'send_confirmation': _('Resend Confirmation Instructions'),
'recover_password': _('Recover Password'),
'reset_password': _('Reset Password'),
'retype_password': _('Retype Password'),
'new_password': _('New Password'),
'change_password': _('Change Password'),
'send_login_link': _('Send Login Link')
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)[0]
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.DataRequired):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_NOT_PROVIDED')
email_validator = Email(message='INVALID_EMAIL_ADDRESS')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=6, max=128, message='PASSWORD_INVALID_LENGTH')
def get_form_field_label(key):
return lazy_gettext(_default_field_labels.get(key, ''))
def unique_user_email(form, field):
if _datastore.get_user(field.data) is not None:
msg = get_message('EMAIL_ALREADY_ASSOCIATED', email=field.data)[0]
raise ValidationError(msg)
def valid_user_email(form, field):
form.user = _datastore.get_user(field.data)
if form.user is None:
raise ValidationError(get_message('USER_DOES_NOT_EXIST')[0])
class Form(BaseForm):
def __init__(self, *args, **kwargs):
if current_app.testing:
self.TIME_LIMIT = None
super(Form, self).__init__(*args, **kwargs)
class EmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class UserEmailFormMixin():
user = None
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
class UniqueEmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, unique_user_email])
class PasswordFormMixin():
password = PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin():
password = PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin():
password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('password', message='RETYPE_PASSWORD_MISMATCH'),
password_required])
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data and not validate_redirect_url(field.data):
field.data = ''
flash(*get_message('INVALID_REDIRECT'))
raise ValidationError(get_message('INVALID_REDIRECT')[0])
class RegisterFormMixin():
submit = SubmitField(get_form_field_label('register'))
def to_dict(form):
def is_field_and_user_attr(member):
return isinstance(member, Field) and \
hasattr(_datastore.user_model, member.name)
fields = inspect.getmembers(form, is_field_and_user_attr)
return dict((key, value.data) for key, value in fields)
class SendConfirmationForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('send_confirmation'))
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(get_message('ALREADY_CONFIRMED')[0])
return False
return True
class ForgotPasswordForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('recover_password'))
def validate(self):
if not super(ForgotPasswordForm, self).validate():
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
return True
class PasswordlessLoginForm(Form, UserEmailFormMixin):
"""The passwordless login form"""
submit = SubmitField(get_form_field_label('send_login_link'))
def __init__(self, *args, **kwargs):
super(PasswordlessLoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(PasswordlessLoginForm, self).validate():
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class LoginForm(Form, NextFormMixin):
"""The default login form"""
email = StringField(get_form_field_label('email'),
validators=[Required(message='EMAIL_NOT_PROVIDED')])
password = PasswordField(get_form_field_label('password'),
validators=[password_required])
remember = BooleanField(get_form_field_label('remember_me'))
submit = SubmitField(get_form_field_label('login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = config_value('DEFAULT_REMEMBER_ME')
if current_app.extensions['security'].recoverable and \
not self.password.description:
html = Markup('<a href="{url}">{message}</a>'.format(
url=url_for_security("forgot_password"),
message=get_message("FORGOT_PASSWORD")[0],
))
self.password.description = html
def validate(self):
if not super(LoginForm, self).validate():
return False
self.user = _datastore.get_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message('USER_DOES_NOT_EXIST')[0])
return False
if not self.user.password:
self.password.errors.append(get_message('PASSWORD_NOT_SET')[0])
return False
if not verify_and_update_password(self.password.data, self.user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class ConfirmRegisterForm(Form, RegisterFormMixin,
UniqueEmailFormMixin, NewPasswordFormMixin):
pass
class RegisterForm(ConfirmRegisterForm, PasswordConfirmFormMixin,
NextFormMixin):
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
class ResetPasswordForm(Form, NewPasswordFormMixin, PasswordConfirmFormMixin):
"""The default reset password form"""
submit = SubmitField(get_form_field_label('reset_password'))
class ChangePasswordForm(Form, PasswordFormMixin):
"""The default change password form"""
new_password = PasswordField(
get_form_field_label('new_password'),
validators=[password_required, password_length])
new_password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('new_password',
message='RETYPE_PASSWORD_MISMATCH'),
password_required])
submit = SubmitField(get_form_field_label('change_password'))
def validate(self):
if not super(ChangePasswordForm, self).validate():
return False
if not verify_and_update_password(self.password.data, current_user):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data == self.new_password.data:
self.password.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
``` |
[
{
"content": "```python\nfrom __future__ import with_statement\n\nfrom fudge import Fake, patched_context, with_fakes\nimport unittest\nfrom nose.tools import raises, ok_\nimport random\nimport sys\n\nimport fabric\nfrom fabric.tasks import WrappedCallableTask, execute, Task, get_task_details\nfrom fabric.main ... | [
{
"content": "<|memory_start|>```python\nfrom __future__ import with_statement\n\nfrom fudge import Fake, patched_context, with_fakes\nimport unittest\nfrom nose.tools import raises, ok_\nimport random\nimport sys\n\nimport fabric\nfrom fabric.tasks import WrappedCallableTask, execute, Task, get_task_details\nf... | ```python
from __future__ import with_statement
from fudge import Fake, patched_context, with_fakes
import unittest
from nose.tools import raises, ok_
import random
import sys
import fabric
from fabric.tasks import WrappedCallableTask, execute, Task, get_task_details
from fabric.main import display_command
from fabric.api import run, env, settings, hosts, roles, hide, parallel, task, runs_once, serial
from fabric.exceptions import NetworkError
from mock_streams import mock_streams
from utils import eq_, FabricTest, aborts, support
from server import server
def test_base_task_provides_undefined_name():
task = Task()
eq_("undefined", task.name)
@raises(NotImplementedError)
def test_base_task_raises_exception_on_call_to_run():
task = Task()
task.run()
class TestWrappedCallableTask(unittest.TestCase):
def test_passes_unused_args_to_parent(self):
args = [i for i in range(random.randint(1, 10))]
def foo(): pass
try:
WrappedCallableTask(foo, *args)
except TypeError:
msg = "__init__ raised a TypeError, meaning args weren't handled"
self.fail(msg)
def test_passes_unused_kwargs_to_parent(self):
random_range = range(random.randint(1, 10))
kwargs = dict([("key_%s" % i, i) for i in random_range])
def foo(): pass
try:
WrappedCallableTask(foo, **kwargs)
except TypeError:
self.fail(
"__init__ raised a TypeError, meaning kwargs weren't handled")
def test_allows_any_number_of_args(self):
args = [i for i in range(random.randint(0, 10))]
def foo(): pass
WrappedCallableTask(foo, *args)
def test_allows_any_number_of_kwargs(self):
kwargs = dict([("key%d" % i, i) for i in range(random.randint(0, 10))])
def foo(): pass
WrappedCallableTask(foo, **kwargs)
def test_run_is_wrapped_callable(self):
def foo(): pass
task = WrappedCallableTask(foo)
eq_(task.wrapped, foo)
def test_name_is_the_name_of_the_wrapped_callable(self):
def foo(): pass
foo.__name__ = "random_name_%d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.name, foo.__name__)
def test_name_can_be_overridden(self):
def foo(): pass
eq_(WrappedCallableTask(foo).name, 'foo')
eq_(WrappedCallableTask(foo, name='notfoo').name, 'notfoo')
def test_reads_double_under_doc_from_callable(self):
def foo(): pass
foo.__doc__ = "Some random __doc__: %d" % random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(task.__doc__, foo.__doc__)
def test_dispatches_to_wrapped_callable_on_run(self):
random_value = "some random value %d" % random.randint(1000, 2000)
def foo(): return random_value
task = WrappedCallableTask(foo)
eq_(random_value, task())
def test_passes_all_regular_args_to_run(self):
def foo(*args): return args
random_args = tuple(
[random.randint(1000, 2000) for i in range(random.randint(1, 5))]
)
task = WrappedCallableTask(foo)
eq_(random_args, task(*random_args))
def test_passes_all_keyword_args_to_run(self):
def foo(**kwargs): return kwargs
random_kwargs = {}
for i in range(random.randint(1, 5)):
random_key = ("foo", "bar", "baz", "foobar", "barfoo")[i]
random_kwargs[random_key] = random.randint(1000, 2000)
task = WrappedCallableTask(foo)
eq_(random_kwargs, task(**random_kwargs))
def test_calling_the_object_is_the_same_as_run(self):
random_return = random.randint(1000, 2000)
def foo(): return random_return
task = WrappedCallableTask(foo)
eq_(task(), task.run())
class TestTask(unittest.TestCase):
def test_takes_an_alias_kwarg_and_wraps_it_in_aliases_list(self):
random_alias = "alias_%d" % random.randint(100, 200)
task = Task(alias=random_alias)
self.assertTrue(random_alias in task.aliases)
def test_aliases_are_set_based_on_provided_aliases(self):
aliases = ["a_%d" % i for i in range(random.randint(1, 10))]
task = Task(aliases=aliases)
self.assertTrue(all([a in task.aliases for a in aliases]))
def test_aliases_are_None_by_default(self):
task = Task()
self.assertTrue(task.aliases is None)
# Reminder: decorator syntax, e.g.:
# @foo
# def bar():...
#
# is semantically equivalent to:
# def bar():...
# bar = foo(bar)
#
# this simplifies testing :)
def test_decorator_incompatibility_on_task():
from fabric.decorators import task, hosts, runs_once, roles
def foo(): return "foo"
foo = task(foo)
# since we aren't setting foo to be the newly decorated thing, its cool
hosts('me@localhost')(foo)
runs_once(foo)
roles('www')(foo)
def test_decorator_closure_hiding():
"""
@task should not accidentally destroy decorated attributes from @hosts/etc
"""
from fabric.decorators import task, hosts
def foo():
print(env.host_string)
foo = task(hosts("me@localhost")(foo))
eq_(["me@localhost"], foo.hosts)
#
# execute()
#
def dict_contains(superset, subset):
"""
Assert that all key/val pairs in dict 'subset' also exist in 'superset'
"""
for key, value in subset.iteritems():
ok_(key in superset)
eq_(superset[key], value)
class TestExecute(FabricTest):
@with_fakes
def test_calls_task_function_objects(self):
"""
should execute the passed-in function object
"""
execute(Fake(callable=True, expect_call=True))
@with_fakes
def test_should_look_up_task_name(self):
"""
should also be able to handle task name strings
"""
name = 'task1'
commands = {name: Fake(callable=True, expect_call=True)}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@with_fakes
def test_should_handle_name_of_Task_object(self):
"""
handle corner case of Task object referrred to by name
"""
name = 'task2'
class MyTask(Task):
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
mytask.name = name
commands = {name: mytask}
with patched_context(fabric.state, 'commands', commands):
execute(name)
@aborts
def test_should_abort_if_task_name_not_found(self):
"""
should abort if given an invalid task name
"""
execute('thisisnotavalidtaskname')
def test_should_not_abort_if_task_name_not_found_with_skip(self):
"""
should not abort if given an invalid task name
and skip_unknown_tasks in env
"""
env.skip_unknown_tasks = True
execute('thisisnotavalidtaskname')
del env['skip_unknown_tasks']
@with_fakes
def test_should_pass_through_args_kwargs(self):
"""
should pass in any additional args, kwargs to the given task.
"""
task = (
Fake(callable=True, expect_call=True)
.with_args('foo', biz='baz')
)
execute(task, 'foo', biz='baz')
@with_fakes
def test_should_honor_hosts_kwarg(self):
"""
should use hosts kwarg to set run list
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
hosts = hostlist[:]
# Side-effect which asserts the value of env.host_string when it runs
def host_string():
eq_(env.host_string, hostlist.pop(0))
task = Fake(callable=True, expect_call=True).calls(host_string)
with hide('everything'):
execute(task, hosts=hosts)
def test_should_honor_hosts_decorator(self):
"""
should honor @hosts on passed-in task objects
"""
# Make two full copies of a host list
hostlist = ['a', 'b', 'c']
@hosts(*hostlist[:])
def task():
eq_(env.host_string, hostlist.pop(0))
with hide('running'):
execute(task)
def test_should_honor_roles_decorator(self):
"""
should honor @roles on passed-in task objects
"""
# Make two full copies of a host list
roledefs = {'role1': ['a', 'b', 'c']}
role_copy = roledefs['role1'][:]
@roles('role1')
def task():
eq_(env.host_string, role_copy.pop(0))
with settings(hide('running'), roledefs=roledefs):
execute(task)
@with_fakes
def test_should_set_env_command_to_string_arg(self):
"""
should set env.command to any string arg, if given
"""
name = "foo"
def command():
eq_(env.command, name)
task = Fake(callable=True, expect_call=True).calls(command)
with patched_context(fabric.state, 'commands', {name: task}):
execute(name)
@with_fakes
def test_should_set_env_command_to_name_attr(self):
"""
should set env.command to TaskSubclass.name if possible
"""
name = "foo"
def command():
eq_(env.command, name)
task = (
Fake(callable=True, expect_call=True)
.has_attr(name=name)
.calls(command)
)
execute(task)
@with_fakes
def test_should_set_all_hosts(self):
"""
should set env.all_hosts to its derived host list
"""
hosts = ['a', 'b']
roledefs = {'r1': ['c', 'd']}
roles = ['r1']
exclude_hosts = ['a']
def command():
eq_(set(env.all_hosts), set(['b', 'c', 'd']))
task = Fake(callable=True, expect_call=True).calls(command)
with settings(hide('everything'), roledefs=roledefs):
execute(
task, hosts=hosts, roles=roles, exclude_hosts=exclude_hosts
)
@mock_streams('stdout')
def test_should_print_executing_line_per_host(self):
"""
should print "Executing" line once per host
"""
def task():
pass
execute(task, hosts=['host1', 'host2'])
eq_(sys.stdout.getvalue(), """[host1] Executing task 'task'
[host2] Executing task 'task'
""")
@mock_streams('stdout')
def test_should_not_print_executing_line_for_singletons(self):
"""
should not print "Executing" line for non-networked tasks
"""
def task():
pass
with settings(hosts=[]): # protect against really odd test bleed :(
execute(task)
eq_(sys.stdout.getvalue(), "")
def test_should_return_dict_for_base_case(self):
"""
Non-network-related tasks should return a dict w/ special key
"""
def task():
return "foo"
eq_(execute(task), {'<local-only>': 'foo'})
@server(port=2200)
@server(port=2201)
def test_should_return_dict_for_serial_use_case(self):
"""
Networked but serial tasks should return per-host-string dict
"""
ports = [2200, 2201]
hosts = map(lambda x: '127.0.0.1:%s' % x, ports)
def task():
run("ls /simple")
return "foo"
with hide('everything'):
eq_(execute(task, hosts=hosts), {
'127.0.0.1:2200': 'foo',
'127.0.0.1:2201': 'foo'
})
@server()
def test_should_preserve_None_for_non_returning_tasks(self):
"""
Tasks which don't return anything should still show up in the dict
"""
def local_task():
pass
def remote_task():
with hide('everything'):
run("ls /simple")
eq_(execute(local_task), {'<local-only>': None})
with hide('everything'):
eq_(
execute(remote_task, hosts=[env.host_string]),
{env.host_string: None}
)
def test_should_use_sentinel_for_tasks_that_errored(self):
"""
Tasks which errored but didn't abort should contain an eg NetworkError
"""
def task():
run("whoops")
host_string = 'localhost:1234'
with settings(hide('everything'), skip_bad_hosts=True):
retval = execute(task, hosts=[host_string])
assert isinstance(retval[host_string], NetworkError)
@server(port=2200)
@server(port=2201)
def test_parallel_return_values(self):
"""
Parallel mode should still return values as in serial mode
"""
@parallel
@hosts('127.0.0.1:2200', '127.0.0.1:2201')
def task():
run("ls /simple")
return env.host_string.split(':')[1]
with hide('everything'):
retval = execute(task)
eq_(retval, {'127.0.0.1:2200': '2200', '127.0.0.1:2201': '2201'})
@with_fakes
def test_should_work_with_Task_subclasses(self):
"""
should work for Task subclasses, not just WrappedCallableTask
"""
class MyTask(Task):
name = "mytask"
run = Fake(callable=True, expect_call=True)
mytask = MyTask()
execute(mytask)
@server(port=2200)
@server(port=2201)
def test_nested_execution_with_explicit_ports(self):
"""
nested executions should work with defined ports
"""
def expect_host_string_port():
eq_(env.port, '2201')
return "bar"
def expect_env_port():
eq_(env.port, '2202')
def expect_per_host_config_port():
eq_(env.port, '664')
run = execute(expect_default_config_port, hosts=['some_host'])
return run['some_host']
def expect_default_config_port():
# uses `Host *` in ssh_config
eq_(env.port, '666')
return "bar"
def main_task():
eq_(env.port, '2200')
execute(expect_host_string_port, hosts=['localhost:2201'])
with settings(port='2202'):
execute(expect_env_port, hosts=['localhost'])
with settings(
use_ssh_config=True,
ssh_config_path=support("ssh_config")
):
run = execute(expect_per_host_config_port, hosts='myhost')
return run['myhost']
run = execute(main_task, hosts=['localhost:2200'])
eq_(run['localhost:2200'], 'bar')
class TestExecuteEnvInteractions(FabricTest):
def set_network(self):
# Don't update env.host/host_string/etc
pass
@server(port=2200)
@server(port=2201)
def test_should_not_mutate_its_own_env_vars(self):
"""
internal env changes should not bleed out, but task env changes should
"""
# Task that uses a handful of features which involve env vars
@parallel
@hosts('username@127.0.0.1:2200', 'username@127.0.0.1:2201')
def mytask():
run("ls /simple")
# Pre-assertions
assertions = {
'parallel': False,
'all_hosts': [],
'host': None,
'hosts': [],
'host_string': None
}
for key, value in assertions.items():
eq_(env[key], value)
# Run
with hide('everything'):
result = execute(mytask)
eq_(len(result), 2)
# Post-assertions
for key, value in assertions.items():
eq_(env[key], value)
@server()
def test_should_allow_task_to_modify_env_vars(self):
@hosts('username@127.0.0.1:2200')
def mytask():
run("ls /simple")
env.foo = "bar"
with hide('everything'):
execute(mytask)
eq_(env.foo, "bar")
eq_(env.host_string, None)
class TestTaskDetails(unittest.TestCase):
def test_old_style_task_with_default_args(self):
"""
__details__() should print docstr for old style task methods with default args
"""
def task_old_style(arg1, arg2, arg3=None, arg4='yes'):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2, arg3=None, arg4='yes'",
details)
def test_old_style_task_without_default_args(self):
"""
__details__() should print docstr for old style task methods without default args
"""
def task_old_style(arg1, arg2):
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: arg1, arg2",
details)
def test_old_style_task_without_args(self):
"""
__details__() should print docstr for old style task methods without args
"""
def task_old_style():
'''Docstring'''
details = get_task_details(task_old_style)
eq_("Docstring\n"
"Arguments: ",
details)
def test_decorated_task(self):
"""
__details__() should print docstr for method with any number and order of decorations
"""
expected = "\n".join([
"Docstring",
"Arguments: arg1",
])
@task
def decorated_task(arg1):
'''Docstring'''
actual = decorated_task.__details__()
eq_(expected, actual)
@runs_once
@task
def decorated_task1(arg1):
'''Docstring'''
actual = decorated_task1.__details__()
eq_(expected, actual)
@runs_once
@serial
@task
def decorated_task2(arg1):
'''Docstring'''
actual = decorated_task2.__details__()
eq_(expected, actual)
def test_subclassed_task(self):
"""
__details__() should print docstr for subclassed task methods with args
"""
class SpecificTask(Task):
def run(self, arg1, arg2, arg3):
'''Docstring'''
eq_("Docstring\n"
"Arguments: self, arg1, arg2, arg3",
SpecificTask().__details__())
@mock_streams('stdout')
def test_multiline_docstring_indented_correctly(self):
"""
display_command() should properly indent docstr for old style task methods
"""
def mytask(arg1):
"""
This is a multi line docstring.
For reals.
"""
try:
with patched_context(fabric.state, 'commands', {'mytask': mytask}):
display_command('mytask')
except SystemExit: # ugh
pass
eq_(
sys.stdout.getvalue(),
"""Displaying detailed information for task 'mytask':
This is a multi line docstring.
For reals.
Arguments: arg1
"""
)
``` |
[
{
"content": "Here is the snippet:\n```python\n#!/usr/bin/env python\n#\n# Copyright 2007 Doug Hellmann.\n#\n#\n# All Rights Reserved\n#\n# Permission to use, copy, modify, and distribute this software and\n# its documentation for any purpose and without fee is hereby\n# granted, provide... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# Copyright 2007 Doug Hellmann.\n#\n#\n# All Rights Reserved\n#\n# Permission to use, copy, modify, and distribute this software and\n# its documentation for any purpose and without fee is hereby\n# ... | ```python
#!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Splitting strings with shlex.
"""
__version__ = "$Id$"
#end_pymotw_header
import shlex
text = """This text has "quoted parts" inside it."""
print 'ORIGINAL:', repr(text)
print
print 'TOKENS:'
print shlex.split(text)
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python\n\nimport argparse\nimport numpy as np\nimport math\nimport sys\nimport pdb\nimport bisect\n\nap = argparse.ArgumentParser(description=\"Print gap support from output of PrintGaps.py.\")\nap.add_argument(\"table\", help=\"Input tabular f... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport argparse\nimport numpy as np\nimport math\nimport sys\nimport pdb\nimport bisect\n\nap = argparse.ArgumentParser(description=\"Print gap support from output of PrintGaps.py.\")\nap.add_argument(\"table\", help=\... | ```python
#!/usr/bin/env python
import argparse
import numpy as np
import math
import sys
import pdb
import bisect
ap = argparse.ArgumentParser(description="Print gap support from output of PrintGaps.py.")
ap.add_argument("table", help="Input tabular file")
ap.add_argument("out", help="Output file, stdout implies stdout")
ap.add_argument("--overlap", help="Required overlap consistency", type=float, default=0.60)
ap.add_argument("--minSupport", help="Min overlapping clusters", type=int, default=2)
args = ap.parse_args()
if (args.out == "stdout"):
outFile = sys.stdout
else:
outFile = open(args.out, 'w')
inFile = open(args.table, 'r')
prevChrom = ""
intv = ()
prevRead = ""
def Overlap(a, b, pct):
if (a[1] < b[0] or a[0] > b[1]):
return False
span = max(a[1], b[1]) - min(a[0], b[0])
overlap = min(a[1], b[1]) - max(a[0], b[0])
if (float(overlap) / span >= pct):
return True
intv = None
prevChrom = ""
prevRead = ""
prevOp = ""
strings = []
names = []
tsds = []
for line in inFile:
vals = line.split()
if (len(vals) == 7):
vals.append(vals[6])
vals[7] = "notsd"
curChrom = vals[0]
try:
read = '/'.join(vals[7].split('/')[0:2])
except:
sys.stderr.write("Error joining %s\n" % vals)
raise
op = vals[3]
# (intvStart, intvEnd) = vals[3].split(',')
intvStart = int(vals[1])
intvEnd = int(vals[2])
curIntv = (int(intvStart), int(intvEnd))
# if (intv is not None):
# print str(intv) + " " + str(curIntv) + " " + str(Overlap(intv, curIntv, args.overlap))
if (intv is not None and Overlap(intv, curIntv, args.overlap) and curChrom == prevChrom and op == prevOp):
if (read != prevRead):
intv = (min(intv[0], curIntv[0]), max(intv[1], curIntv[1]))
support += 1
strings.append(vals[5])
tsds.append(vals[6])
names.append(vals[7])
else:
uniqueSupport = len(np.unique(names))
if (intv is not None and uniqueSupport >= args.minSupport):
meanLength = np.mean(np.asarray([len(seq) for seq in strings]))
outFile.write( intvChrom + "\t" + str(intv[0]) + "\t" + str(intv[1]) + "\t" + str(meanLength) + "\t" + str(uniqueSupport) + "\t" + intvOp + "\t" + ';'.join(strings) + '\t' + ';'.join(names) + '\t' + ';'.join(tsds) + "\n")
support = 1
intv = curIntv
intvOp = op
strings = [vals[5]]
tsds = [vals[6]]
names = [vals[7]]
intvChrom = curChrom
prevChrom = curChrom
prevRead = read
prevOp = op
if (outFile != sys.stdout):
outFile.close()
``` |
[
{
"content": "Here is the code block:\n```python\nimport sqlalchemy.types\nimport sqlalchemy.schema\nimport sqlalchemy.orm\n\nimport rod.model\n\n\nclass Lesson(rod.model.db.Model, rod.model.PersistentMixin):\n __tablename__ = 'lesson'\n\n id = sqlalchemy.schema.Column(sqlalchemy.types.Integer, primary_ke... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\nimport sqlalchemy.types\nimport sqlalchemy.schema\nimport sqlalchemy.orm\n\nimport rod.model\n\n\nclass Lesson(rod.model.db.Model, rod.model.PersistentMixin):\n __tablename__ = 'lesson'\n\n id = sqlalchemy.schema.Column(sqlalchemy.types.Int... | ```python
import sqlalchemy.types
import sqlalchemy.schema
import sqlalchemy.orm
import rod.model
class Lesson(rod.model.db.Model, rod.model.PersistentMixin):
__tablename__ = 'lesson'
id = sqlalchemy.schema.Column(sqlalchemy.types.Integer, primary_key=True)
time = sqlalchemy.schema.Column(sqlalchemy.types.DateTime)
teacher_id = sqlalchemy.schema.Column(sqlalchemy.types.Integer,
sqlalchemy.schema.ForeignKey(
'staff.id',
name='fk_lesson_teacher_id'
))
teacher = sqlalchemy.orm.relationship(
'Staff',
back_populates='lessons'
)
group_id = sqlalchemy.schema.Column(sqlalchemy.types.Integer,
sqlalchemy.schema.ForeignKey(
'group.id',
name='fk_lesson_group_id'
))
group = sqlalchemy.orm.relationship(
'Group',
back_populates='lessons'
)
# Students attendance for this lesson
attendance = sqlalchemy.orm.relationship(
'Attendance',
back_populates='lesson'
)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes,... | ```python
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.python_setup import PythonRepos
from pants.backend.python.python_setup import PythonSetup
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.tasks2.gather_sources import GatherSources
from pants.build_graph.resources import Resources
from pants_test.tasks.task_test_base import TaskTestBase
class GatherSourcesTest(TaskTestBase):
@classmethod
def task_type(cls):
return GatherSources
def test_gather_sources(self):
filemap = {
'src/python/foo.py': 'foo_py_content',
'src/python/bar.py': 'bar_py_content',
'src/python/baz.py': 'baz_py_content',
'resources/qux/quux.txt': 'quux_txt_content',
}
for rel_path, content in filemap.items():
self.create_file(rel_path, content)
sources1 = self.make_target(spec='//:sources1_tgt', target_type=PythonLibrary,
sources=['src/python/foo.py', 'src/python/bar.py'])
sources2 = self.make_target(spec='//:sources2_tgt', target_type=PythonLibrary,
sources=['src/python/baz.py'])
resources = self.make_target(spec='//:resources_tgt', target_type=Resources,
sources=['resources/qux/quux.txt'])
pex = self._gather_sources([sources1, sources2, resources])
pex_root = pex.cmdline()[1]
for rel_path, expected_content in filemap.items():
with open(os.path.join(pex_root, rel_path)) as infile:
content = infile.read()
self.assertEquals(expected_content, content)
def _gather_sources(self, target_roots):
context = self.context(target_roots=target_roots, for_subsystems=[PythonSetup, PythonRepos])
# We must get an interpreter via the cache, instead of using PythonInterpreter.get() directly,
# to ensure that the interpreter has setuptools and wheel support.
interpreter = PythonInterpreter.get()
interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=context.log.debug)
interpreters = interpreter_cache.setup(paths=[os.path.dirname(interpreter.binary)],
filters=[str(interpreter.identity.requirement)])
context.products.get_data(PythonInterpreter, lambda: interpreters[0])
task = self.create_task(context)
task.execute()
return context.products.get_data(GatherSources.PYTHON_SOURCES)
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nfrom pydotorg import context_processors\nfrom django.test import TestCase\n\n\nclass MockRequest(object):\n def __init__(self, path):\n self.path = path\n super().__init__()\n\n\nclass TemplateProcessorsTestCase(TestCase):\n... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nfrom pydotorg import context_processors\nfrom django.test import TestCase\n\n\nclass MockRequest(object):\n def __init__(self, path):\n self.path = path\n super().__init__()\n\n\nclass TemplateProcessorsTestC... | ```python
from pydotorg import context_processors
from django.test import TestCase
class MockRequest(object):
def __init__(self, path):
self.path = path
super().__init__()
class TemplateProcessorsTestCase(TestCase):
def test_url_name(self):
mock_request = MockRequest(path='/inner/')
self.assertEqual({'URL_NAMESPACE': '', 'URL_NAME': 'inner'}, context_processors.url_name(mock_request))
mock_request = MockRequest(path='/events/calendars/')
self.assertEqual({'URL_NAMESPACE': 'events', 'URL_NAME': 'events:calendar_list'}, context_processors.url_name(mock_request))
mock_request = MockRequest(path='/getit-404/releases/3.3.3/not-an-actual-thing/')
self.assertEqual({}, context_processors.url_name(mock_request))
mock_request = MockRequest(path='/getit-404/releases/3.3.3/\r\n/')
self.assertEqual({}, context_processors.url_name(mock_request))
mock_request = MockRequest(path='/nothing/here/')
self.assertEqual({}, context_processors.url_name(mock_request))
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nfrom enigma import eComponentScan, iDVBFrontend, eTimer\nfrom Components.NimManager import nimmanager as nimmgr\nfrom Tools.Transponder import getChannelNumber\n\nclass ServiceScan:\n\n\tIdle = 1\n\tRunning = 2\n\tDone = 3\n\tError = 4\n\tDoneP... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nfrom enigma import eComponentScan, iDVBFrontend, eTimer\nfrom Components.NimManager import nimmanager as nimmgr\nfrom Tools.Transponder import getChannelNumber\n\nclass ServiceScan:\n\n\tIdle = 1\n\tRunning = 2\n\tDone = 3\n\tEr... | ```python
from enigma import eComponentScan, iDVBFrontend, eTimer
from Components.NimManager import nimmanager as nimmgr
from Tools.Transponder import getChannelNumber
class ServiceScan:
Idle = 1
Running = 2
Done = 3
Error = 4
DonePartially = 5
Errors = {
0: _("error starting scanning"),
1: _("error while scanning"),
2: _("no resource manager"),
3: _("no channel list")
}
def scanStatusChanged(self):
if self.state == self.Running:
self.progressbar.setValue(self.scan.getProgress())
self.lcd_summary and self.lcd_summary.updateProgress(self.scan.getProgress())
if self.scan.isDone():
errcode = self.scan.getError()
if errcode == 0:
self.state = self.DonePartially
self.servicelist.listAll()
else:
self.state = self.Error
self.errorcode = errcode
self.network.setText("")
self.transponder.setText("")
else:
result = self.foundServices + self.scan.getNumServices()
percentage = self.scan.getProgress()
if percentage > 99:
percentage = 99
#TRANSLATORS: The stb is performing a channel scan, progress percentage is printed in '%d' (and '%%' will show a single '%' symbol)
message = ngettext("Scanning - %d%% completed", "Scanning - %d%% completed", percentage) % percentage
message += ", "
#TRANSLATORS: Intermediate scanning result, '%d' channel(s) have been found so far
message += ngettext("%d channel found", "%d channels found", result) % result
self.text.setText(message)
transponder = self.scan.getCurrentTransponder()
network = ""
tp_text = ""
if transponder:
tp_type = transponder.getSystem()
if tp_type == iDVBFrontend.feSatellite:
network = _("Satellite")
tp = transponder.getDVBS()
orb_pos = tp.orbital_position
try:
sat_name = str(nimmgr.getSatDescription(orb_pos))
except KeyError:
sat_name = ""
if orb_pos > 1800: # west
orb_pos = 3600 - orb_pos
h = _("W")
else:
h = _("E")
if ("%d.%d" % (orb_pos/10, orb_pos%10)) in sat_name:
network = sat_name
else:
network = ("%s %d.%d %s") % (sat_name, orb_pos / 10, orb_pos % 10, h)
tp_text = { tp.System_DVB_S : "DVB-S", tp.System_DVB_S2 : "DVB-S2" }.get(tp.system, "")
if tp_text == "DVB-S2":
tp_text = ("%s %s") % ( tp_text,
{ tp.Modulation_Auto : "Auto", tp.Modulation_QPSK : "QPSK",
tp.Modulation_8PSK : "8PSK", tp.Modulation_QAM16 : "QAM16",
tp.Modulation_16APSK : "16APSK", tp.Modulation_32APSK : "32APSK" }.get(tp.modulation, ""))
tp_text = ("%s %d%c / %d / %s") % ( tp_text, tp.frequency/1000,
{ tp.Polarisation_Horizontal : 'H', tp.Polarisation_Vertical : 'V', tp.Polarisation_CircularLeft : 'L',
tp.Polarisation_CircularRight : 'R' }.get(tp.polarisation, ' '),
tp.symbol_rate/1000,
{ tp.FEC_Auto : "AUTO", tp.FEC_1_2 : "1/2", tp.FEC_2_3 : "2/3",
tp.FEC_3_4 : "3/4", tp.FEC_5_6 : "5/6", tp.FEC_7_8 : "7/8",
tp.FEC_8_9 : "8/9", tp.FEC_3_5 : "3/5", tp.FEC_4_5 : "4/5",
tp.FEC_9_10 : "9/10", tp.FEC_None : "NONE" }.get(tp.fec, ""))
if tp.is_id > -1 and tp.system == tp.System_DVB_S2:
tp_text = ("%s IS %d") % (tp_text, tp.is_id)
elif tp_type == iDVBFrontend.feCable:
network = _("Cable")
tp = transponder.getDVBC()
tp_text = ("DVB-C/C2 %s %d MHz / SR:%d / FEC:%s") %( { tp.Modulation_Auto : "AUTO",
tp.Modulation_QAM16 : "QAM16", tp.Modulation_QAM32 : "QAM32",
tp.Modulation_QAM64 : "QAM64", tp.Modulation_QAM128 : "QAM128",
tp.Modulation_QAM256 : "QAM256" }.get(tp.modulation, ""),
tp.frequency/1000,
tp.symbol_rate/1000,
{ tp.FEC_Auto : "AUTO", tp.FEC_1_2 : "1/2", tp.FEC_2_3 : "2/3",
tp.FEC_3_4 : "3/4", tp.FEC_5_6 : "5/6", tp.FEC_7_8 : "7/8",
tp.FEC_8_9 : "8/9", tp.FEC_3_5 : "3/5", tp.FEC_4_5 : "4/5",
tp.FEC_9_10 : "9/10", tp.FEC_6_7 : "6/7", tp.FEC_None : "NONE" }.get(tp.fec_inner, ""))
elif tp_type == iDVBFrontend.feTerrestrial:
network = _("Terrestrial")
tp = transponder.getDVBT()
channel = getChannelNumber(tp.frequency, self.scanList[self.run]["feid"])
if channel:
channel = _("CH") + "%s " % channel
freqMHz = "%0.1f MHz" % (tp.frequency/1000000.)
tp_text = ("%s %s %s %s") %(
{
tp.System_DVB_T_T2 : "DVB-T/T2",
tp.System_DVB_T : "DVB-T",
tp.System_DVB_T2 : "DVB-T2"
}.get(tp.system, ""),
{
tp.Modulation_QPSK : "QPSK",
tp.Modulation_QAM16 : "QAM16", tp.Modulation_QAM64 : "QAM64",
tp.Modulation_Auto : "AUTO", tp.Modulation_QAM256 : "QAM256"
}.get(tp.modulation, ""),
"%s%s" % (channel, freqMHz.replace(".0","")),
{
tp.Bandwidth_8MHz : "Bw 8MHz", tp.Bandwidth_7MHz : "Bw 7MHz", tp.Bandwidth_6MHz : "Bw 6MHz",
tp.Bandwidth_Auto : "Bw Auto", tp.Bandwidth_5MHz : "Bw 5MHz",
tp.Bandwidth_1_712MHz : "Bw 1.712MHz", tp.Bandwidth_10MHz : "Bw 10MHz"
}.get(tp.bandwidth, ""))
elif tp_type == iDVBFrontend.feATSC:
network = _("ATSC")
tp = transponder.getATSC()
freqMHz = "%0.1f MHz" % (tp.frequency/1000000.)
tp_text = ("%s %s %s %s") % (
{
tp.System_ATSC : _("ATSC"),
tp.System_DVB_C_ANNEX_B : _("DVB-C ANNEX B")
}.get(tp.system, ""),
{
tp.Modulation_Auto : _("Auto"),
tp.Modulation_QAM16 : "QAM16",
tp.Modulation_QAM32 : "QAM32",
tp.Modulation_QAM64 : "QAM64",
tp.Modulation_QAM128 : "QAM128",
tp.Modulation_QAM256 : "QAM256",
tp.Modulation_VSB_8 : "8VSB",
tp.Modulation_VSB_16 : "16VSB"
}.get(tp.modulation, ""),
freqMHz.replace(".0",""),
{
tp.Inversion_Off : _("Off"),
tp.Inversion_On :_("On"),
tp.Inversion_Unknown : _("Auto")
}.get(tp.inversion, ""))
else:
print "unknown transponder type in scanStatusChanged"
self.network.setText(network)
self.transponder.setText(tp_text)
if self.state == self.DonePartially:
self.foundServices += self.scan.getNumServices()
self.text.setText(ngettext("Scanning completed, %d channel found", "Scanning completed, %d channels found", self.foundServices) % self.foundServices)
if self.state == self.Error:
self.text.setText(_("ERROR - failed to scan (%s)!") % (self.Errors[self.errorcode]) )
if self.state == self.DonePartially or self.state == self.Error:
self.delaytimer.start(100, True)
def __init__(self, progressbar, text, servicelist, passNumber, scanList, network, transponder, frontendInfo, lcd_summary):
self.foundServices = 0
self.progressbar = progressbar
self.text = text
self.servicelist = servicelist
self.passNumber = passNumber
self.scanList = scanList
self.frontendInfo = frontendInfo
self.transponder = transponder
self.network = network
self.run = 0
self.lcd_summary = lcd_summary
self.scan = None
self.delaytimer = eTimer()
self.delaytimer.callback.append(self.execEnd)
def doRun(self):
self.scan = eComponentScan()
self.frontendInfo.frontend_source = lambda : self.scan.getFrontend()
self.feid = self.scanList[self.run]["feid"]
self.flags = self.scanList[self.run]["flags"]
self.networkid = 0
if "networkid" in self.scanList[self.run]:
self.networkid = self.scanList[self.run]["networkid"]
self.state = self.Idle
self.scanStatusChanged()
for x in self.scanList[self.run]["transponders"]:
self.scan.addInitial(x)
def updatePass(self):
size = len(self.scanList)
if size > 1:
txt = "%s %s/%s (%s)" % (_("pass"), self.run + 1, size, nimmgr.getNim(self.scanList[self.run]["feid"]).slot_name)
self.passNumber.setText(txt)
def execBegin(self):
self.doRun()
self.updatePass()
self.scan.statusChanged.get().append(self.scanStatusChanged)
self.scan.newService.get().append(self.newService)
self.servicelist.clear()
self.state = self.Running
err = self.scan.start(self.feid, self.flags, self.networkid)
self.frontendInfo.updateFrontendData()
if err:
self.state = self.Error
self.errorcode = 0
self.scanStatusChanged()
def execEnd(self):
if self.scan is None:
if not self.isDone():
print "*** warning *** scan was not finished!"
return
self.scan.statusChanged.get().remove(self.scanStatusChanged)
self.scan.newService.get().remove(self.newService)
self.scan = None
if self.run != len(self.scanList) - 1:
self.run += 1
self.execBegin()
else:
self.state = self.Done
def isDone(self):
return self.state == self.Done or self.state == self.Error
def newService(self):
newServiceName = self.scan.getLastServiceName()
newServiceRef = self.scan.getLastServiceRef()
self.servicelist.addItem((newServiceName, newServiceRef))
self.lcd_summary and self.lcd_summary.updateService(newServiceName)
def destroy(self):
self.state = self.Idle
if self.scan is not None:
self.scan.statusChanged.get().remove(self.scanStatusChanged)
self.scan.newService.get().remove(self.newService)
self.scan = None
``` |
[
{
"content": "Here is a code snippet:\n```python\n# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the Lic... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# (c) 2015, Ansible Inc,\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either vers... | ```python
# (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.module_utils.network_common import load_provider
from imp import find_module, load_module
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
# we should be able to stream line this a bit by creating a common
# provider argument spec in module_utils/network_common.py or another
# option is that there isn't a need to push provider into the module
# since the connection is started in the action handler.
f, p, d = find_module('ansible')
f2, p2, d2 = find_module('module_utils', [p])
f3, p3, d3 = find_module(play_context.network_os, [p2])
module = load_module('ansible.module_utils.' + play_context.network_os, f3, p3, d3)
self.provider = load_provider(module.get_provider_argspec(), self._task.args)
if play_context.network_os == 'junos':
play_context.connection = 'netconf'
play_context.port = int(self.provider['port'] or self._play_context.port or 830)
else:
play_context.connection = 'network_cli'
play_context.port = int(self.provider['port'] or self._play_context.port or 22)
play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
play_context.remote_user = self.provider['username'] or self._play_context.connection_user
play_context.password = self.provider['password'] or self._play_context.password
play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
play_context.timeout = int(self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
if 'authorize' in self.provider.keys():
play_context.become = self.provider['authorize'] or False
play_context.become_pass = self.provider['auth_pass']
socket_path = self._start_connection(play_context)
task_vars['ansible_socket'] = socket_path
if 'fail_on_missing_module' not in self._task.args:
self._task.args['fail_on_missing_module'] = False
result = super(ActionModule, self).run(tmp, task_vars)
module = self._get_implementation_module(play_context.network_os, self._task.action)
if not module:
if self._task.args['fail_on_missing_module']:
result['failed'] = True
else:
result['failed'] = False
result['msg'] = ('Could not find implementation module %s for %s' %
(self._task.action, play_context.network_os))
else:
new_module_args = self._task.args.copy()
# perhaps delete the provider argument here as well since the
# module code doesn't need the information, the connection is
# already started
if 'network_os' in new_module_args:
del new_module_args['network_os']
del new_module_args['fail_on_missing_module']
display.vvvv('Running implementation module %s' % module)
result.update(self._execute_module(module_name=module,
module_args=new_module_args, task_vars=task_vars,
wrap_async=self._task.async))
display.vvvv('Caching network OS %s in facts' % play_context.network_os)
result['ansible_facts'] = {'network_os': play_context.network_os}
return result
def _start_connection(self, play_context):
display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent',
play_context, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
if str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
return socket_path
def _get_network_os(self, task_vars):
if ('network_os' in self._task.args and self._task.args['network_os']):
display.vvvv('Getting network OS from task argument')
network_os = self._task.args['network_os']
elif (self._play_context.network_os):
display.vvvv('Getting network OS from inventory')
network_os = self._play_context.network_os
elif ('network_os' in task_vars['ansible_facts'] and
task_vars['ansible_facts']['network_os']):
display.vvvv('Getting network OS from fact')
network_os = task_vars['ansible_facts']['network_os']
else:
# this will be replaced by the call to get_capabilities() on the
# connection
display.vvvv('Getting network OS from net discovery')
network_os = None
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
implementation_module = network_os + '_' + platform_agnostic_module.partition('_')[2]
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom .base import AuthenticationBase\n\n\nclass Delegated(AuthenticationBase):\n \"\"\"Delegated authentication endpoints.\n\n Args:\n domain (str): Your auth0 domain (e.g: username.auth0.com)\n \"\"\"\n\n def get_token(self, client_id, ... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom .base import AuthenticationBase\n\n\nclass Delegated(AuthenticationBase):\n \"\"\"Delegated authentication endpoints.\n\n Args:\n domain (str): Your auth0 domain (e.g: username.auth0.com)\n \"\"\"\n\n def get_token(s... | ```python
from .base import AuthenticationBase
class Delegated(AuthenticationBase):
"""Delegated authentication endpoints.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def get_token(self, client_id, target, api_type, grant_type,
id_token=None, refresh_token=None, scope='openid'):
"""Obtain a delegation token.
"""
if id_token and refresh_token:
raise ValueError('Only one of id_token or refresh_token '
'can be None')
data = {
'client_id': client_id,
'grant_type': grant_type,
'target': target,
'scope': scope,
'api_type': api_type,
}
if id_token:
data.update({'id_token': id_token})
elif refresh_token:
data.update({'refresh_token': refresh_token})
else:
raise ValueError('Either id_token or refresh_token must '
'have a value')
return self.post('{}://{}/delegation'.format(self.protocol, self.domain), data=data)
``` |
[
{
"content": "```python\nfrom bson import json_util\nfrom flask import Blueprint, render_template, request\nfrom flask import Response\nfrom datetime import datetime\n\nfrom app import mongo\nfrom app import utils\nimport json\nmod_api = Blueprint('api', __name__, url_prefix='/api')\n\n\n@mod_api.route('/', met... | [
{
"content": "<|memory_start|>```python\nfrom bson import json_util\nfrom flask import Blueprint, render_template, request\nfrom flask import Response\nfrom datetime import datetime\n\nfrom app import mongo\nfrom app import utils\nimport json\nmod_api = Blueprint('api', __name__, url_prefix='/api')\n\n\n@mod_ap... | ```python
from bson import json_util
from flask import Blueprint, render_template, request
from flask import Response
from datetime import datetime
from app import mongo
from app import utils
import json
mod_api = Blueprint('api', __name__, url_prefix='/api')
@mod_api.route('/', methods=['GET'])
def index():
''' Renders the App index page.
:return:
'''
return render_template('mod_importer/index.html')
@mod_api.route('/search', methods=['POST'])
def search():
params = request.json
# Format date
if 'date' in params:
params['to_date'] = datetime.strptime(params['date'].split('---')[1], '%m-%d-%Y')
params['from_date'] = datetime.strptime(params['date'].split('---')[0], '%m-%d-%Y')
result = {}
result['stats'] = utils.get_stats(params)
result['monthly-stats'] = utils.get_monthly_incidents_stats(params)
result['quarterly-stats'] = utils.get_quarterly_incidents_stats(params)
result['rank-stats'] = utils.get_rank_stats(params)
result['incident-stats'] = utils.get_incidents_stats(params)
result['violence-types'] = utils.get_violence_types(params)
result['daily-stats'] = utils.get_incident_types_by_time(params)
result['top-3'] = utils.get_top_3_stats(params)
result['map-victims-count'] = utils.get_map_victims_count(params)
result['census'] = utils.get_census_info(params)
result['raw-incident-stats'] = utils.get_raw_incidents(params)
result['rank-download-stats'] = utils.get_download_stats(params)
resp = Response(
response=json_util.dumps(result),
mimetype='application/json')
return resp
@mod_api.route('/get_total_victims_number/<string:type>/<string:date>/<string:violence_type>/<string:name>', methods=['GET'])
def get_victims(type, date=None, violence_type=None, name=None):
" Get incidents number based on given params."
if violence_type:
violence_type = violence_type.replace('-', '/')
if date:
from_date = datetime.strptime(date.split('---')[0], '%m-%d-%Y')
to_date = datetime.strptime(date.split('---')[1], '%m-%d-%Y')
match = None
group = None
if name != 'Bangladesh':
match = {
"$match": {
type: {
"$nin": [
""
],
"$in": [
name
]
},
'violence_type': {
"$in": [
str(violence_type)
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
else:
match = {
"$match": {
type: {
"$nin": [
""
]
},
"incident_date": {"$gte": from_date, "$lte": to_date}
}
}
if type == 'division':
group = {
"$group": {
"_id": {
'division': '$district'
},
"incidents": {
"$sum": 1
}
}
}
else:
group = {
"$group": {
"_id": {
type: '$' + type
},
"incidents": {
"$sum": 1
}
}
}
sort = {
"$sort": {
"incidents": -1
}
}
project = {
"$project": {
"_id": 0,
type: "$_id." + type,
"incidents": "$incidents"
}
}
aggregation = [match, group, sort, project]
result = mongo.db.mgr.aggregate(aggregation)
resp = Response(
response=json_util.dumps(result['result']),
mimetype='application/json')
return resp
@mod_api.route('/<string:dataset>/get/violence-types', methods=['GET', 'POST'])
def get_violence_types(dataset):
"Get all the violence types based on the given dataset."
violence_types = mongo.db[dataset].distinct('violence_type')
resp = Response(
response=json_util.dumps(violence_types),
mimetype='application/json')
return resp
@mod_api.route('/census/<string:name>/<int:level>', methods=['GET', 'POST'])
def get_census_info(name, level):
"Get census info based on the given Division, District, Upazila."
census_info = None
if level == 0:
census_info = mongo.db.census.find_one({"division": name})
elif level == 1:
census_info = mongo.db.census.find_one({"district": name})
elif level == 2:
census_info = mongo.db.census.find_one({"upazila": name})
resp = Response(
response=json_util.dumps(census_info),
mimetype='application/json')
return resp
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\nfrom optparse import make_option\nfrom django.core.management.base import BaseCommand\nfrom django.core.exceptions import ObjectDoesNotExist\n\nclass Command(BaseCommand):\n \"\"\"\n Get/Delete list of contributo... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\nfrom optparse import make_option\nfrom django.core.management.base import BaseCommand\nfrom django.core.exceptions import ObjectDoesNotExist\n\nclass Command(BaseCommand):\n \"\"\"\n Get/Delete li... | ```python
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.exceptions import ObjectDoesNotExist
class Command(BaseCommand):
"""
Get/Delete list of contributors, non-contributors and total users
"""
option_list = BaseCommand.option_list + (
make_option('-d', '--delete',
action='store_true',
default=False,
help='Delete non contributing users'),
)
def handle(self, *args, **options):
from django.contrib.auth.models import User
contribs = []
try:
from tendenci.apps.directories.models import Directory
for directory in Directory.objects.all():
contribs.append(directory.creator)
contribs.append(directory.owner)
except ImportError:
pass
try:
from tendenci.apps.articles.models import Article
for article in Article.objects.all():
contribs.append(article.creator)
contribs.append(article.owner)
except ImportError:
pass
try:
from tendenci.apps.events.models import Event
for event in Event.objects.all():
contribs.append(event.creator)
contribs.append(event.owner)
except ImportError:
pass
try:
from tendenci.apps.photos.models import Photo
for photo in Photo.objects.all():
contribs.append(photo.creator)
contribs.append(photo.owner)
except ImportError:
pass
try:
from tendenci.apps.pages.models import Page
for page in Page.objects.all():
contribs.append(page.creator)
contribs.append(page.owner)
except ImportError:
pass
try:
from tendenci.apps.news.models import News
for news in News.objects.all():
contribs.append(news.creator)
contribs.append(news.owner)
except ImportError:
pass
contribs = list(set(contribs)) # remove duplicates
slackers = User.objects.exclude(username__in=[c.username for c in contribs if c])
print 'contribs', len(contribs)
print 'slackers', slackers.count()
print 'everyone', User.objects.count()
print 'Pass the -d or --delete fn to delete no contributors'
delete = options['delete']
if delete:
from django.db import connections, DEFAULT_DB_ALIAS, IntegrityError
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
cursor = connection.cursor()
cursor.execute('SET FOREIGN_KEY_CHECKS=0;')
for slacker in slackers:
try:
print slacker
slacker.delete()
except IntegrityError as e:
print 'Integrity Error deleting', slacker
cursor.execute('SET FOREIGN_KEY_CHECKS=1;')
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.