code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from bisect import insort
from time import strftime, time, localtime, mktime
from enigma import eTimer
import datetime
class TimerEntry:
StateWaiting = 0
StatePrepared = 1
StateRunning = 2
StateEnded = 3
StateFailed = 4
def __init__(self, begin, end):
self.begin = begin
self.prepare_time = 20
self.end = end
self.state = 0
self.resetRepeated()
#begindate = localtime(self.begin)
#newdate = datetime.datetime(begindate.tm_year, begindate.tm_mon, begindate.tm_mday 0, 0, 0);
self.repeatedbegindate = begin
self.backoff = 0
self.disabled = False
def resetState(self):
self.state = self.StateWaiting
self.cancelled = False
self.first_try_prepare = True
self.timeChanged()
def resetRepeated(self):
self.repeated = int(0)
def setRepeated(self, day):
self.repeated |= (2 ** day)
def isRunning(self):
return self.state == self.StateRunning
def addOneDay(self, timedatestruct):
oldHour = timedatestruct.tm_hour
newdate = (datetime.datetime(timedatestruct.tm_year, timedatestruct.tm_mon, timedatestruct.tm_mday, timedatestruct.tm_hour, timedatestruct.tm_min, timedatestruct.tm_sec) + datetime.timedelta(days=1)).timetuple()
if localtime(mktime(newdate)).tm_hour != oldHour:
return (datetime.datetime(timedatestruct.tm_year, timedatestruct.tm_mon, timedatestruct.tm_mday, timedatestruct.tm_hour, timedatestruct.tm_min, timedatestruct.tm_sec) + datetime.timedelta(days=2)).timetuple()
return newdate
# update self.begin and self.end according to the self.repeated-flags
def processRepeated(self, findRunningEvent = True):
if (self.repeated != 0):
now = int(time()) + 1
#to avoid problems with daylight saving, we need to calculate with localtime, in struct_time representation
localrepeatedbegindate = localtime(self.repeatedbegindate)
localbegin = localtime(self.begin)
localend = localtime(self.end)
localnow = localtime(now)
day = []
flags = self.repeated
for x in (0, 1, 2, 3, 4, 5, 6):
if (flags & 1 == 1):
day.append(0)
else:
day.append(1)
flags = flags >> 1
# if day is NOT in the list of repeated days
# OR if the day IS in the list of the repeated days, check, if event is currently running... then if findRunningEvent is false, go to the next event
while ((day[localbegin.tm_wday] != 0) or (mktime(localrepeatedbegindate) > mktime(localbegin)) or
((day[localbegin.tm_wday] == 0) and ((findRunningEvent and localend < localnow) or ((not findRunningEvent) and localbegin < localnow)))):
localbegin = self.addOneDay(localbegin)
localend = self.addOneDay(localend)
#we now have a struct_time representation of begin and end in localtime, but we have to calculate back to (gmt) seconds since epoch
self.begin = int(mktime(localbegin))
self.end = int(mktime(localend))
if self.begin == self.end:
self.end += 1
self.timeChanged()
def __lt__(self, o):
return self.getNextActivation() < o.getNextActivation()
# must be overridden
def activate(self):
pass
# can be overridden
def timeChanged(self):
pass
# check if a timer entry must be skipped
def shouldSkip(self):
if self.disabled:
return True
return self.end <= time() and (self.state == TimerEntry.StateWaiting or self.state == TimerEntry.StateFailed)
def abort(self):
self.end = time()
# in case timer has not yet started, but gets aborted (so it's preparing),
# set begin to now.
if self.begin > self.end:
self.begin = self.end
self.cancelled = True
# must be overridden!
def getNextActivation():
pass
def disable(self):
self.disabled = True
def enable(self):
self.disabled = False
class Timer:
# the time between "polls". We do this because
# we want to account for time jumps etc.
# of course if they occur <100s before starting,
# it's not good. thus, you have to repoll when
# you change the time.
#
# this is just in case. We don't want the timer
# hanging. we use this "edge-triggered-polling-scheme"
# anyway, so why don't make it a bit more fool-proof?
MaxWaitTime = 100
def __init__(self):
self.timer_list = [ ]
self.processed_timers = [ ]
self.timer = eTimer()
self.timer.callback.append(self.calcNextActivation)
self.lastActivation = time()
self.calcNextActivation()
self.on_state_change = [ ]
def stateChanged(self, entry):
for f in self.on_state_change:
f(entry)
def cleanup(self):
self.processed_timers = [entry for entry in self.processed_timers if entry.disabled]
def cleanupDaily(self, days):
limit = time() - (days * 3600 * 24)
self.processed_timers = [entry for entry in self.processed_timers if (entry.disabled and entry.repeated) or (entry.end and (entry.end > limit))]
def addTimerEntry(self, entry, noRecalc=0):
entry.processRepeated()
# when the timer has not yet started, and is already passed,
# don't go trough waiting/running/end-states, but sort it
# right into the processedTimers.
if entry.shouldSkip() or entry.state == TimerEntry.StateEnded or (entry.state == TimerEntry.StateWaiting and entry.disabled):
insort(self.processed_timers, entry)
entry.state = TimerEntry.StateEnded
else:
insort(self.timer_list, entry)
if not noRecalc:
self.calcNextActivation()
# small piece of example code to understand how to use record simulation
# if NavigationInstance.instance:
# lst = [ ]
# cnt = 0
# for timer in self.timer_list:
# print "timer", cnt
# cnt += 1
# if timer.state == 0: #waiting
# lst.append(NavigationInstance.instance.recordService(timer.service_ref))
# else:
# print "STATE: ", timer.state
#
# for rec in lst:
# if rec.start(True): #simulate
# print "FAILED!!!!!!!!!!!!"
# else:
# print "OK!!!!!!!!!!!!!!"
# NavigationInstance.instance.stopRecordService(rec)
# else:
# print "no NAV"
def setNextActivation(self, now, when):
delay = int((when - now) * 1000)
self.timer.start(delay, 1)
self.next = when
def calcNextActivation(self):
now = time()
if self.lastActivation > now:
print "[timer.py] timewarp - re-evaluating all processed timers."
tl = self.processed_timers
self.processed_timers = [ ]
for x in tl:
# simulate a "waiting" state to give them a chance to re-occure
x.resetState()
self.addTimerEntry(x, noRecalc=1)
self.processActivation()
self.lastActivation = now
min = int(now) + self.MaxWaitTime
# calculate next activation point
if self.timer_list:
w = self.timer_list[0].getNextActivation()
if w < min:
min = w
if int(now) < 1072224000 and min > now + 5:
# system time has not yet been set (before 01.01.2004), keep a short poll interval
min = now + 5
self.setNextActivation(now, min)
def timeChanged(self, timer):
print "time changed"
timer.timeChanged()
if timer.state == TimerEntry.StateEnded:
self.processed_timers.remove(timer)
else:
try:
self.timer_list.remove(timer)
except:
print "[timer] Failed to remove, not in list"
return
# give the timer a chance to re-enqueue
if timer.state == TimerEntry.StateEnded:
timer.state = TimerEntry.StateWaiting
self.addTimerEntry(timer)
def doActivate(self, w):
self.timer_list.remove(w)
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = TimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
# did this timer reached the last state?
if w.state < TimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = TimerEntry.StateWaiting
self.addTimerEntry(w)
else:
insort(self.processed_timers, w)
self.stateChanged(w)
def processActivation(self):
t = int(time()) + 1
# we keep on processing the first entry until it goes into the future.
while self.timer_list and self.timer_list[0].getNextActivation() < t:
self.doActivate(self.timer_list[0])
|
postla/e2-gui
|
timer.py
|
Python
|
gpl-2.0
| 8,176
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.orchestration import orchestration_service
from openstack import resource2 as resource
class SoftwareConfig(resource.Resource):
resource_key = 'software_config'
resources_key = 'software_configs'
base_path = '/software_configs'
service = orchestration_service.OrchestrationService()
# capabilities
allow_create = True
allow_list = True
allow_get = True
allow_delete = True
allow_update = False
# Properties
#: Configuration script or manifest that defines which configuration is
#: performed
config = resource.Body('config')
#: The date and time when the software config resource was created.
created_at = resource.Body('creation_time')
#: A string indicating the namespace used for grouping software configs.
group = resource.Body('group')
#: A list of schemas each representing an input this software config
#: expects.
inputs = resource.Body('inputs')
#: Name of the software config.
name = resource.Body('name')
#: A string that contains options that are specific to the configuraiton
#: management tool that this resource uses.
options = resource.Body('options')
#: A list of schemas each representing an output this software config
#: produces.
outputs = resource.Body('outputs')
def create(self, session):
# This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request.
return super(SoftwareConfig, self).create(session, prepend_key=False)
|
briancurtin/python-openstacksdk
|
openstack/orchestration/v1/software_config.py
|
Python
|
apache-2.0
| 2,111
|
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <aronparsons@gmail.com>
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
import xmlrpclib
from optparse import Option
from spacecmd.utils import *
def help_cryptokey_create(self):
print 'cryptokey_create: Create a cryptographic key'
print '''usage: cryptokey_create [options]
options:
-t GPG or SSL
-d DESCRIPTION
-f KEY_FILE'''
def do_cryptokey_create(self, args):
options = [Option('-t', '--type', action='store'),
Option('-d', '--description', action='store'),
Option('-f', '--file', action='store')]
(args, options) = parse_arguments(args, options)
if is_interactive(options):
options.type = prompt_user('GPG or SSL [G/S]:')
options.description = ''
while options.description == '':
options.description = prompt_user('Description:')
if self.user_confirm('Read an existing file [y/N]:',
nospacer=True, ignore_yes=True):
options.file = prompt_user('File:')
else:
options.contents = editor(delete=True)
else:
if not options.type:
logging.error('The key type is required')
return
if not options.description:
logging.error('A description is required')
return
if not options.file:
logging.error('A file containing the key is required')
return
# read the file the user specified
if options.file:
options.contents = read_file(options.file)
# translate the key type to what the server expects
if re.match('G', options.type, re.I):
options.type = 'GPG'
elif re.match('S', options.type, re.I):
options.type = 'SSL'
else:
logging.error('Invalid key type')
return
self.client.kickstart.keys.create(self.session,
options.description,
options.type,
options.contents)
####################
def help_cryptokey_delete(self):
print 'cryptokey_delete: Delete a cryptographic key'
print 'usage: cryptokey_delete NAME'
def complete_cryptokey_delete(self, text, line, beg, end):
if len(line.split(' ')) <= 2:
return tab_completer(self.do_cryptokey_list('', True),
text)
def do_cryptokey_delete(self, args):
(args, _options) = parse_arguments(args)
if not args:
self.help_cryptokey_delete()
return
# allow globbing of cryptokey names
keys = filter_results(self.do_cryptokey_list('', True), args)
logging.debug("cryptokey_delete called with args %s, keys=%s" %
(args, keys))
if not keys:
logging.error("No keys matched argument %s" % args)
return
# Print the keys prior to the confirmation
print '\n'.join(sorted(keys))
if self.user_confirm('Delete key(s) [y/N]:'):
for key in keys:
self.client.kickstart.keys.delete(self.session, key)
####################
def help_cryptokey_list(self):
print 'cryptokey_list: List all cryptographic keys (SSL, GPG)'
print 'usage: cryptokey_list'
def do_cryptokey_list(self, args, doreturn=False):
keys = self.client.kickstart.keys.listAllKeys(self.session)
keys = [k.get('description') for k in keys]
if doreturn:
return keys
else:
if keys:
print '\n'.join(sorted(keys))
####################
def help_cryptokey_details(self):
print 'cryptokey_details: Show the contents of a cryptographic key'
print 'usage: cryptokey_details KEY ...'
def complete_cryptokey_details(self, text, line, beg, end):
return tab_completer(self.do_cryptokey_list('', True), text)
def do_cryptokey_details(self, args):
(args, _options) = parse_arguments(args)
if not args:
self.help_cryptokey_details()
return
# allow globbing of cryptokey names
keys = filter_results(self.do_cryptokey_list('', True), args)
logging.debug("cryptokey_details called with args %s, keys=%s" %
(args, keys))
if not keys:
logging.error("No keys matched argument %s" % args)
return
add_separator = False
for key in keys:
try:
details = self.client.kickstart.keys.getDetails(self.session,
key)
except xmlrpclib.Fault:
logging.warning('%s is not a valid crypto key' % key)
return
if add_separator:
print self.SEPARATOR
add_separator = True
print 'Description: %s' % details.get('description')
print 'Type: %s' % details.get('type')
print
print details.get('content')
|
jdobes/spacewalk
|
spacecmd/src/lib/cryptokey.py
|
Python
|
gpl-2.0
| 5,706
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
from neon.transforms.transform import Transform
class Identity(Transform):
"""
Identity activation function, :math:`f(x) = x`
"""
def __init__(self, name=None):
"""
Class constructor.
"""
super(Identity, self).__init__(name)
def __call__(self, x):
"""
Returns the input as output.
Arguments:
x (Tensor or optree): input value
Returns:
Tensor or optree: identical to input
"""
return x
def bprop(self, x):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Integer value 1.
"""
return 1
class Rectlin(Transform):
"""
Rectified Linear Unit (ReLu) activation function, :math:`f(x) = \max(x, 0)`.
Can optionally set a slope which will make this a Leaky ReLu.
"""
def __init__(self, slope=0, name=None):
"""
Class constructor.
Args:
slope (float, optional): Slope for negative domain. Defaults to 0.
name (string, optional): Name to assign this class instance.
"""
super(Rectlin, self).__init__(name)
self.slope = slope
self.is_mklop = True
def __call__(self, x, nglayer=None):
"""
Returns the Exponential Linear activation
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: output activation
"""
return self.be.fprop_relu(nglayer, x, self.slope)
def bprop(self, x, nglayer=None, error=None, deltas=None):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Derivative
"""
return self.be.bprop_relu(nglayer, x, error, deltas, self.slope)
class Rectlinclip(Transform):
"""
Clipped ReLu activation function
Computes the function f(x) = min(max(0, x),cutoff)
"""
def __init__(self, slope=0, name=None, xcut=20.0):
super(Rectlinclip, self).__init__(name)
self.xcut = xcut
self.slope = slope
def __call__(self, x):
return self.be.minimum(self.be.maximum(x, 0) + self.slope * self.be.minimum(x, 0),
self.xcut)
def bprop(self, x):
return (self.be.greater(x, 0) + self.slope * self.be.less(x, 0)) *\
self.be.greater(self.xcut, x)
class Explin(Transform):
"""
Exponential Linear activation function, :math:`f(x) = \max(x, 0) + \\alpha (e^{\min(x, 0)}-1)`
From: Clevert, Unterthiner and Hochreiter, ICLR 2016.
"""
def __init__(self, alpha=1.0, name=None):
"""
Class constructor.
Arguments:
alpha (float): weight of exponential factor for negative values (default: 1.0).
name (string, optional): Name (default: None)
"""
super(Explin, self).__init__(name)
self.alpha = alpha
def __call__(self, x):
"""
Returns the Exponential Linear activation
Arguments:
x (Tensor or optree): input value
Returns:
Tensor or optree: output activation
"""
return self.be.maximum(x, 0) + self.alpha * (self.be.exp(self.be.minimum(x, 0)) - 1)
def bprop(self, x):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Derivative
"""
return self.be.greater(x, 0) + self.be.minimum(x, 0) + self.alpha * self.be.less(x, 0)
class Normalizer(Transform):
"""
Normalize inputs by a fixed divisor.
"""
def __init__(self, name=None, divisor=128.):
"""
Class constructor.
Arguments:
divisor (float, optional): Normalization factor (default: 128)
name (string, optional): Name (default: None)
"""
super(Normalizer, self).__init__(name)
self.divisor = divisor
def __call__(self, x):
"""
Returns the normalized value.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output :math:`x / N`
"""
return x / self.divisor
def bprop(self, x):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Derivative
"""
return 1.0 / self.divisor
class Softmax(Transform):
"""
SoftMax activation function. Ensures that the activation output sums to 1.
"""
def __init__(self, axis=0, name=None, epsilon=2**-23):
"""
Class constructor.
Arguments:
name (string, optional): Name (default: none)
epsilon (float, optional): Not used.
axis (int, optional): axis to perform softmax (default: 0)
"""
super(Softmax, self).__init__(name)
self.epsilon = epsilon
self.axis = axis
def __call__(self, x):
"""
Returns the Softmax value.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return self.be.fprop_softmax(x, self.axis)
def bprop(self, x):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Integer value 1
"""
return 1
class PixelwiseSoftmax(Transform):
"""
Pixelwise SoftMax activation function.
Computes the function f(x_k) = exp(x_k) / sum_i(exp(x_i))
"""
def __init__(self, c, name=None, epsilon=2**-23):
super(PixelwiseSoftmax, self).__init__(name)
self.epsilon = epsilon
self.c = c
def __call__(self, x):
y = x.reshape((self.c, -1))
y[:] = (self.be.reciprocal(self.be.sum(self.be.exp(y - self.be.max(y, axis=0)), axis=0)) *
self.be.exp(y - self.be.max(y, axis=0)))
return x
def bprop(self, x):
return 1
class Tanh(Transform):
"""
Hyperbolic tangent activation function, :math:`f(x) = \\tanh(x)`.
"""
def __init__(self, name=None):
"""
Class constructor.
"""
super(Tanh, self).__init__(name)
def __call__(self, x):
"""
Returns the hyperbolic tangent.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return self.be.tanh(x)
def bprop(self, x):
"""
Returns the derivative.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Derivative, :math:`1-x^2`
"""
return (1.0 - self.be.square(x))
class Logistic(Transform):
"""
Logistic sigmoid activation function, :math:`f(x) = 1 / (1 + \exp(-x))`
Squashes the input from range :math:`[-\infty,+\infty]` to :math:`[0, 1]`
"""
def __init__(self, name=None, shortcut=False):
"""
Initialize Logistic based on whether shortcut is True or False. Shortcut
should be set to true when Logistic is used in conjunction with a CrossEntropy cost.
Doing so allows a shortcut calculation to be used during backpropagation.
Args:
shortcut (bool): If True, shortcut calculation will be used during backpropagation.
"""
super(Logistic, self).__init__(name=name)
self.set_shortcut(shortcut)
def set_shortcut(self, shortcut):
"""
Sets the backpropagation to use the shortcut when gradients do not
need to be calculated.
If True, a shortcut calculation is used. If False, the actual derivative
is return during backpropagation.
Arguments:
shortcut (bool): If True, shortcut calculation will be used during backpropagation.
"""
self.shortcut = shortcut
if shortcut:
self.bprop_func = lambda x: 1
else:
self.bprop_func = lambda x: x * (1.0 - x)
def __call__(self, x):
"""
Returns the sigmoidal activation.
Arguments:
x (Tensor or optree): Input value
Returns:
Tensor or optree: Output activation
"""
return self.be.sig(x)
def bprop(self, y):
"""
Returns the derivative of the logistic (sigmoid) function at y (output)
Args:
y (Tensor or OpTree): input. y = f(x)
Returns:
OpTree: Derivative of the Logistic (sigmoid)
Returns 1 if shortcut is True.
Returns derivative (y*(1-y)) if shortcut is False.
"""
return self.bprop_func(y)
class Sign(Transform):
"""
Sign activation function.
Computes the function f(x) = Sign(x).
Uses straight-through estimator for bprop.
"""
def __init__(self, name=None):
super(Sign, self).__init__(name)
def __call__(self, x):
self.inputs = self.be.array(x.get())
return self.be.binarize(x, x, stochastic=False)
def bprop(self, x):
return self.be.less_equal(self.be.absolute(self.inputs), 1)
|
NervanaSystems/neon
|
neon/transforms/activation.py
|
Python
|
apache-2.0
| 10,173
|
'''
Created on Nov 12, 2014
@author: apuser
'''
import os
import datetime
import sys
files=""
def list_dir(path, res,level):
'''''
res = {'dir':'root', 'child_dirs' : [] , 'files' : [],'level':0}
print list_dir('/root', res)
'''
global files
level+=1
for i in os.listdir(path):
temp_dir = os.path.join(path,i)
if os.path.isdir(temp_dir):
temp = {'dir':temp_dir, 'child_dirs' : [] , 'files' : [],'level':level}
#print temp_dir
res['child_dirs'].append(list_dir(temp_dir, temp,level))
else:
timestamp=os.path.getmtime(temp_dir)
date = datetime.datetime.fromtimestamp(timestamp)
#print temp_dir,os.path.getsize(temp_dir),date.strftime('%Y-%m-%d %H:%M:%S')
files+=temp_dir+" "+str(os.path.getsize(temp_dir))+"byte "+ str(date.strftime('%Y-%m-%d %H:%M:%S')) + "\n"
res['files'].append(i)
return res
def get_config_dirs(dir):
res = {'dir':'root', 'child_dirs' : [] , 'files' : [],'level':0}
return list_dir(dir,res,0)
if __name__ == '__main__':
basepath="/home/apuser/common/wiki/v2.3.1/logs/slog_20141107142021_sp7731gea_user"
#par=os.path.split(dir)[0]
#sub=os.path.split(dir)[1]
if len(sys.argv)>2:
print "more than one args exist"
sys.exit()
pass
basepath=""
if len(sys.argv)==2:
basepath=sys.argv[1]
if not os.path.isdir(basepath):
print "folder ",str(basepath)," is not exist"
sys.exit()
dirf=basepath+"_dir.txt"
res= get_config_dirs(basepath)
f = open(dirf,'w')
f.write(files)
f.close()
if files != "":
print "file store " + dirf
else:
print "no file found"
pass
|
GdZ/scriptfile
|
sh/v2.3.3/genDir.py
|
Python
|
mit
| 1,786
|
import os
import mimetypes
# use our own mime types file because not all extensions are supported on all
# linux distros
_mime_types_file = os.path.join(os.path.dirname(__file__), 'mime.types')
mimetypes.init([_mime_types_file])
def drop_extension(filepath):
index = filepath.rindex('.')
return filepath[0:index]
def replace_extension(extension, filepath):
return '{}.{}'.format(drop_extension(filepath), extension)
def mimetype(filepath):
return mimetypes.guess_type(filepath)[0]
def mimetype_for(extension):
mime = mimetype('mock_file_name.{}'.format(extension))
# if this is none then one of the fileconversions.FileFormats might result
# in none, acting as a fallback for unknown extensions
assert mime is not None
return mime
|
wilbertom/fileconversions
|
fileconversions/helpers.py
|
Python
|
mit
| 776
|
# -*- coding: utf-8 -*-
import base64
from odoo import fields, models, api, _
class FatturaPAAttachmentIn(models.Model):
_name = "fatturapa.attachment.in"
_description = "E-bill import file"
_inherits = {'ir.attachment': 'ir_attachment_id'}
_inherit = ['mail.thread']
_order = 'id desc'
ir_attachment_id = fields.Many2one(
'ir.attachment', 'Attachment', required=True, ondelete="cascade")
att_name = fields.Char(related='ir_attachment_id.name', store=True)
in_invoice_ids = fields.One2many(
'account.invoice', 'fatturapa_attachment_in_id',
string="In Bills", readonly=True)
xml_supplier_id = fields.Many2one(
"res.partner", string="Supplier", compute="_compute_xml_data",
store=True)
invoices_number = fields.Integer(
"Bills Number", compute="_compute_xml_data", store=True)
invoices_total = fields.Float(
"Bills Total", compute="_compute_xml_data", store=True,
help="If specified by supplier, total amount of the document net of "
"any discount and including tax charged to the buyer/ordered"
)
registered = fields.Boolean(
"Registered", compute="_compute_registered", store=True)
e_invoice_validation_error = fields.Boolean(
compute='_compute_e_invoice_validation_error')
e_invoice_validation_message = fields.Text(
compute='_compute_e_invoice_validation_error')
_sql_constraints = [(
'ftpa_attachment_in_name_uniq',
'unique(att_name)',
'The name of the attachment must be unique!')]
@api.depends('in_invoice_ids.e_invoice_validation_error')
def _compute_e_invoice_validation_error(self):
for att in self:
bills_with_error = att.in_invoice_ids.filtered(
lambda b: b.e_invoice_validation_error
)
if not bills_with_error:
continue
att.e_invoice_validation_error = True
errors_message_template = u"{bill}:\n{errors}"
error_messages = list()
for bill in bills_with_error:
error_messages.append(
errors_message_template.format(
bill=bill.display_name,
errors=bill.e_invoice_validation_message))
att.e_invoice_validation_message = "\n\n".join(error_messages)
@api.onchange('datas_fname')
def onchagne_datas_fname(self):
self.name = self.datas_fname
def get_xml_string(self):
return self.ir_attachment_id.get_xml_string()
@api.multi
@api.depends('ir_attachment_id.datas')
def _compute_xml_data(self):
for att in self:
fatt = self.env['wizard.import.fatturapa'].get_invoice_obj(att)
cedentePrestatore = fatt.FatturaElettronicaHeader.CedentePrestatore
partner_id = self.env['wizard.import.fatturapa'].getCedPrest(
cedentePrestatore)
att.xml_supplier_id = partner_id
att.invoices_number = len(fatt.FatturaElettronicaBody)
att.invoices_total = 0
for invoice_body in fatt.FatturaElettronicaBody:
att.invoices_total += float(
invoice_body.DatiGenerali.DatiGeneraliDocumento.
ImportoTotaleDocumento or 0
)
@api.multi
@api.depends('in_invoice_ids')
def _compute_registered(self):
for att in self:
if (
att.in_invoice_ids and
len(att.in_invoice_ids) == att.invoices_number
):
att.registered = True
else:
att.registered = False
def extract_attachments(self, AttachmentsData, invoice_id):
AttachModel = self.env['fatturapa.attachments']
for attach in AttachmentsData:
if not attach.NomeAttachment:
name = _("Attachment without name")
else:
name = attach.NomeAttachment
content = attach.Attachment
_attach_dict = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': attach.DescrizioneAttachment or '',
'compression': attach.AlgoritmoCompressione or '',
'format': attach.FormatoAttachment or '',
'invoice_id': invoice_id,
}
AttachModel.create(_attach_dict)
|
linkitspa/l10n-italy
|
l10n_it_fatturapa_in/models/attachment.py
|
Python
|
agpl-3.0
| 4,485
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
import os
from tests.command.daemon import load
from tests.testcase import ParentTestCase
from dbmanagr.command import daemon
from dbmanagr.utils import mute_stderr
def test_daemon():
os.environ['UNITTEST'] = 'True'
for test in load():
yield test,
del os.environ['UNITTEST']
class DaemonTestCase(ParentTestCase):
def test_writer(self):
"""Tests the writer"""
import sys
sys.argv = ['']
self.assertRaises(
SystemExit,
mute_stderr(daemon.main)
)
self.assertRaises(
SystemExit,
mute_stderr(daemon.main),
[]
)
self.assertIsNone(daemon.main(['stop']))
def test_main(self):
"""Tests the main function"""
self.assertIsNone(daemon.main(['status']))
self.assertIsNone(daemon.main(['stop']))
|
resamsel/dbmanagr
|
src/tests/command/daemon/test_daemon.py
|
Python
|
gpl-3.0
| 1,631
|
import numpy as np
import cartopy.crs as ccrs
from geoviews.element import Image
from geoviews.element.comparison import ComparisonTestCase
from geoviews.operation import project
class TestProjection(ComparisonTestCase):
def test_image_latlon360_wrapping(self):
xs = np.linspace(72, 360, 5)
ys = np.linspace(-60, 60, 3)
img = Image((xs, ys, xs[np.newaxis, :]*ys[:, np.newaxis]))
proj = project(img, projection=ccrs.PlateCarree())
zs = proj.dimension_values('z', flat=False)
self.assertEqual(zs, np.array([
[-12960., -17280., -21600., -4320., -8640.],
[ 0., 0., 0., 0., 0.],
[ 12960., 17280., 21600., 4320., 8640.]
]))
def test_image_project_latlon_to_mercator(self):
xs = np.linspace(72, 360, 5)
ys = np.linspace(-60, 60, 3)
img = Image((xs, ys, xs[np.newaxis, :]*ys[:, np.newaxis]))
proj = project(img)
zs = proj.dimension_values('z', flat=False)
self.assertEqual(zs, np.array([
[-12960., -17280., -21600., -4320., -8640.],
[ 0., 0., 0., 0., 0.],
[ 12960., 17280., 21600., 4320., 8640.]
]))
|
ioam/geoviews
|
geoviews/tests/testprojection.py
|
Python
|
bsd-3-clause
| 1,253
|
default_app_config = 'nodeshot.ui.default.apps.AppConfig'
|
SCORE42/nodeshot
|
nodeshot/ui/default/__init__.py
|
Python
|
gpl-3.0
| 58
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implementation of different ranking methods based on
the citation graph:
- citation count/ time decayed citation count
- pagerank / pagerank with external citations
- time decayed pagerank
"""
# pylint: disable=E0611
import ConfigParser
from math import exp
import datetime
import time
import re
import sys
from numpy import array, ones, zeros, int32, float32, sqrt, dot
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.dbquery import run_sql, serialize_via_marshal, \
deserialize_via_marshal
from invenio.bibtask import write_message
from invenio.config import CFG_ETCDIR
def get_citations_from_file(filename):
"""gets the citation data (who cites who) from a file and returns
- a dictionary of type x:{x1,x2..},
where x is cited by x1,x2..
- a dictionary of type a:{b},
where recid 'a' is asociated with an index 'b' """
cit = {}
dict_of_ids = {}
count = 0
try:
citation_file = open(filename, "r")
except StandardError:
write_message("Cannot find file: %s" % filename, sys.stderr)
raise StandardError
for line in citation_file:
tokens = line.strip().split()
recid_cites = int(tokens[0])
recid_cited = int(tokens[1])
if recid_cited not in cit:
cit[recid_cited] = []
#without this, duplicates might be introduced
if recid_cites not in cit[recid_cited] and recid_cites != recid_cited:
cit[recid_cited].append(recid_cites)
if recid_cites not in dict_of_ids:
dict_of_ids[recid_cites] = count
count += 1
if recid_cited not in dict_of_ids:
dict_of_ids[recid_cited] = count
count += 1
citation_file.close()
write_message("Citation data collected from file: %s" %filename, verbose=2)
write_message("Ids and recids corespondace: %s" \
%str(dict_of_ids), verbose=9)
write_message("Citations: %s" % str(cit), verbose=9)
return cit, dict_of_ids
def get_citations_from_db():
"""gets the citation data (who cites who) from the rnkCITATIONDATA table,
and returns:
-a dictionary of type x:{x1,x2..}, where x is cited by x1,x2..
-a dict of type a:{b} where recid 'a' is asociated with an index 'b'"""
dict_of_ids = {}
count = 0
query = "select object_value from rnkCITATIONDATA \
where object_name = 'citationdict'"
cit_compressed = run_sql(query)
cit = []
if cit_compressed and cit_compressed[0] and cit_compressed[0][0]:
cit = deserialize_via_marshal(cit_compressed[0][0])
if cit:
for item in cit:
#check for duplicates in citation dictionary
cit[item] = set(cit[item])
if item in cit[item]:
cit[item].remove(item)
if item not in dict_of_ids:
dict_of_ids[item] = count
count += 1
for value in cit[item]:
if value not in dict_of_ids:
dict_of_ids[value] = count
count += 1
write_message("Citation data collected\
from rnkCITATIONDATA", verbose=2)
write_message("Ids and recids corespondace: %s" \
% str(dict_of_ids), verbose=9)
write_message("Citations: %s" % str(cit), verbose=9)
return cit, dict_of_ids
else:
write_message("Error while extracting citation data \
from rnkCITATIONDATA table", verbose=1)
else:
write_message("Error while extracting citation data \
from rnkCITATIONDATA table", verbose=1)
return {}, {}
def construct_ref_array(cit, dict_of_ids, len_):
"""returns an array with the number of references that each recid has """
ref = array((), int32)
ref = zeros(len_, int32)
for key in cit:
for value in cit[key]:
ref[dict_of_ids[value]] += 1
write_message("Number of references: %s" %str(ref), verbose=9)
write_message("Finished computing total number \
of references for each paper.", verbose=5)
return ref
def get_external_links_from_file(filename, ref, dict_of_ids):
"""returns a dictionary containing the number of
external links for each recid
external link=citation that is not in our database """
ext_links = {}
#format: ext_links[dict_of_ids[recid]]=number of total external links
try:
external_file = open(filename, "r")
except StandardError:
write_message("Cannot find file: %s" % filename, sys.stderr)
raise StandardError
for line in external_file:
tokens = line.strip().split()
recid = int(tokens[0])
nr_of_external = int(tokens[1])
ext_links[dict_of_ids[recid]] = nr_of_external - ref[dict_of_ids[recid]]
if ext_links[dict_of_ids[recid]] < 0:
ext_links[dict_of_ids[recid]] = 0
external_file.close()
write_message("External link information extracted", verbose=2)
return ext_links
def get_external_links_from_db_old(ref, dict_of_ids, reference_indicator):
"""returns a dictionary containing the number of
external links for each recid
external link=citation that is not in our database """
ext_links = {}
reference_tag_regex = reference_indicator + "[a-z]"
for recid in dict_of_ids:
query = "select COUNT(DISTINCT field_number) from bibrec_bib99x \
where id_bibrec='%s' and id_bibxxx in \
(select id from bib99x where tag RLIKE '%s');" \
% (str(recid), reference_tag_regex)
result_set = run_sql(query)
if result_set:
total_links = int(result_set[0][0])
internal_links = ref[dict_of_ids[recid]]
ext_links[dict_of_ids[recid]] = total_links - internal_links
if ext_links[dict_of_ids[recid]] < 0:
ext_links[dict_of_ids[recid]] = 0
else:
ext_links[dict_of_ids[recid]] = 0
write_message("External link information extracted", verbose=2)
write_message("External links: %s" % str(ext_links), verbose=9)
return ext_links
def get_external_links_from_db(ref, dict_of_ids, reference_indicator):
"""returns a dictionary containing the number of
external links for each recid
external link=citation that is not in our database """
ext_links = {}
dict_all_ref = {}
for recid in dict_of_ids:
dict_all_ref[recid] = 0
ext_links[dict_of_ids[recid]] = 0
reference_db_id = reference_indicator[0:2]
reference_tag_regex = reference_indicator + "[a-z]"
tag_list = run_sql("select id from bib" + reference_db_id + \
"x where tag RLIKE %s", (reference_tag_regex, ))
tag_set = set()
for tag in tag_list:
tag_set.add(tag[0])
ref_list = run_sql("select id_bibrec, id_bibxxx, field_number from \
bibrec_bib" + reference_db_id + "x group by \
id_bibrec, field_number")
for item in ref_list:
recid = int(item[0])
id_bib = int(item[1])
if recid in dict_of_ids and id_bib in tag_set:
dict_all_ref[recid] += 1
for recid in dict_of_ids:
total_links = dict_all_ref[recid]
internal_links = ref[dict_of_ids[recid]]
ext_links[dict_of_ids[recid]] = total_links - internal_links
if ext_links[dict_of_ids[recid]] < 0:
ext_links[dict_of_ids[recid]] = 0
write_message("External link information extracted", verbose=2)
write_message("External links: %s" % str(ext_links), verbose=9)
return ext_links
def avg_ext_links_with_0(ext_links):
"""returns the average number of external links per paper
including in the counting the papers with 0 external links"""
total = 0.0
for item in ext_links:
total += ext_links[item]
avg_ext = total/len(ext_links)
write_message("The average number of external links per paper (including \
papers with 0 external links) is: %s" % str(avg_ext), verbose=3)
return avg_ext
def avg_ext_links_without_0(ext_links):
"""returns the average number of external links per paper
excluding in the counting the papers with 0 external links"""
count = 0.0
total = 0.0
for item in ext_links:
if ext_links[item] != 0:
count += 1
total += ext_links[item]
avg_ext = total/count
write_message("The average number of external links per paper (excluding \
papers with 0 external links) is: %s" % str(avg_ext), verbose=3)
return avg_ext
def leaves(ref):
"""returns the number of papers that do not cite any other paper"""
nr_of_leaves = 0
for i in ref:
if i == 0:
nr_of_leaves += 1
write_message("The number of papers that do not cite \
any other papers: %s" % str(leaves), verbose=3)
return nr_of_leaves
def get_dates_from_file(filename, dict_of_ids):
"""Returns the year of the publication for each paper.
In case the year is not in the db, the year of the submission is taken"""
dates = {}
# the format is: dates[dict_of_ids[recid]] = year
try:
dates_file = open(filename, "r")
except StandardError:
write_message("Cannot find file: %s" % filename, sys.stderr)
raise StandardError
for line in dates_file:
tokens = line.strip().split()
recid = int(tokens[0])
year = int(tokens[1])
dates[dict_of_ids[recid]] = year
dates_file.close()
write_message("Dates extracted", verbose=2)
write_message("Dates dictionary %s" % str(dates), verbose=9)
return dates
def get_dates_from_db(dict_of_ids, publication_year_tag, creation_date_tag):
"""Returns the year of the publication for each paper.
In case the year is not in the db, the year of the submission is taken"""
current_year = int(datetime.datetime.now().strftime("%Y"))
publication_year_db_id = publication_year_tag[0:2]
creation_date_db_id = creation_date_tag[0:2]
total = 0
count = 0
dict_of_dates = {}
for recid in dict_of_ids:
dict_of_dates[recid] = 0
date_list = run_sql("select id, tag, value from bib" + \
publication_year_db_id + "x where tag=%s", \
(publication_year_tag, ))
date_dict = {}
for item in date_list:
date_dict[int(item[0])] = item[2]
pattern = re.compile('.*(\d{4}).*')
date_list = run_sql("select id_bibrec, id_bibxxx, field_number \
from bibrec_bib" + publication_year_db_id +"x")
for item in date_list:
recid = int(item[0])
id_ = int(item[1])
if id_ in date_dict and recid in dict_of_dates:
reg = pattern.match(date_dict[id_])
if reg:
date = int(reg.group(1))
if date > 1000 and date <= current_year:
dict_of_dates[recid] = date
total += date
count += 1
not_covered = []
for recid in dict_of_dates:
if dict_of_dates[recid] == 0:
not_covered.append(recid)
date_list = run_sql("select id, tag, value from bib" + \
creation_date_db_id + "x where tag=%s", \
(creation_date_tag, ))
date_dict = {}
for item in date_list:
date_dict[int(item[0])] = item[2]
date_list = run_sql("select id_bibrec, id_bibxxx, field_number \
from bibrec_bib" + creation_date_db_id + "x")
for item in date_list:
recid = int(item[0])
id_ = int(item[1])
if id_ in date_dict and recid in not_covered:
date = int(str(date_dict[id_])[0:4])
if date > 1000 and date <= current_year:
dict_of_dates[recid] = date
total += date
count += 1
dates = {}
med = total/count
for recid in dict_of_dates:
if dict_of_dates[recid] == 0:
dates[dict_of_ids[recid]] = med
else:
dates[dict_of_ids[recid]] = dict_of_dates[recid]
write_message("Dates extracted", verbose=2)
write_message("Dates dictionary %s" % str(dates), verbose=9)
return dates
def construct_sparse_matrix(cit, ref, dict_of_ids, len_, damping_factor):
"""returns several structures needed in the calculation
of the PAGERANK method using this structures, we don't need
to keep the full matrix in the memory"""
sparse = {}
for item in cit:
for value in cit[item]:
sparse[(dict_of_ids[item], dict_of_ids[value])] = \
damping_factor * 1.0/ref[dict_of_ids[value]]
semi_sparse = []
for j in range(len_):
if ref[j] == 0:
semi_sparse.append(j)
semi_sparse_coeficient = damping_factor/len_
#zero_coeficient = (1-damping_factor)/len_
write_message("Sparse information calculated", verbose=3)
return sparse, semi_sparse, semi_sparse_coeficient
def construct_sparse_matrix_ext(cit, ref, ext_links, dict_of_ids, alpha, beta):
"""if x doesn't cite anyone: cites everyone : 1/len_ -- should be used!
returns several structures needed in the calculation
of the PAGERANK_EXT method"""
len_ = len(dict_of_ids)
sparse = {}
semi_sparse = {}
sparse[0, 0] = 1.0 - alpha
for j in range(len_):
sparse[j+1, 0] = alpha/(len_)
if j not in ext_links:
sparse[0, j+1] = beta/(len_ + beta)
else:
if ext_links[j] == 0:
sparse[0, j+1] = beta/(len_ + beta)
else:
aux = beta * ext_links[j]
if ref[j] == 0:
sparse[0, j+1] = aux/(aux + len_)
else:
sparse[0, j+1] = aux/(aux + ref[j])
if ref[j] == 0:
semi_sparse[j+1] = (1.0 - sparse[0, j + 1])/len_
for item in cit:
for value in cit[item]:
sparse[(dict_of_ids[item] + 1, dict_of_ids[value] + 1)] = \
(1.0 - sparse[0, dict_of_ids[value] + 1])/ref[dict_of_ids[value]]
#for i in range(len_ + 1):
# a = ""
# for j in range (len_ + 1):
# if (i,j) in sparse:
# a += str(sparse[(i,j)]) + "\t"
# else:
# a += "0\t"
# print a
#print semi_sparse
write_message("Sparse information calculated", verbose=3)
return sparse, semi_sparse
def construct_sparse_matrix_time(cit, ref, dict_of_ids, \
damping_factor, date_coef):
"""returns several structures needed in the calculation of the PAGERANK_time
method using this structures,
we don't need to keep the full matrix in the memory"""
len_ = len(dict_of_ids)
sparse = {}
for item in cit:
for value in cit[item]:
sparse[(dict_of_ids[item], dict_of_ids[value])] = damping_factor * \
date_coef[dict_of_ids[value]]/ref[dict_of_ids[value]]
semi_sparse = []
for j in range(len_):
if ref[j] == 0:
semi_sparse.append(j)
semi_sparse_coeficient = damping_factor/len_
#zero_coeficient = (1-damping_factor)/len_
write_message("Sparse information calculated", verbose=3)
return sparse, semi_sparse, semi_sparse_coeficient
def statistics_on_sparse(sparse):
"""returns the number of papers that cite themselves"""
count_diag = 0
for (i, j) in sparse.keys():
if i == j:
count_diag += 1
write_message("The number of papers that cite themselves: %s" % \
str(count_diag), verbose=3)
return count_diag
def pagerank(conv_threshold, check_point, len_, sparse, \
semi_sparse, semi_sparse_coef):
"""the core function of the PAGERANK method
returns an array with the ranks coresponding to each recid"""
weights_old = ones((len_), float32) # initial weights
weights_new = array((), float32)
converged = False
nr_of_check_points = 0
difference = len_
while not converged:
nr_of_check_points += 1
for step in (range(check_point)):
weights_new = zeros((len_), float32)
for (i, j) in sparse.keys():
weights_new[i] += sparse[(i, j)]*weights_old[j]
semi_total = 0.0
for j in semi_sparse:
semi_total += weights_old[j]
weights_new = weights_new + semi_sparse_coef * semi_total + \
(1.0/len_ - semi_sparse_coef) * sum(weights_old)
if step == check_point - 1:
diff = weights_new - weights_old
difference = sqrt(dot(diff, diff))/len_
write_message("Finished step: %s, %s " \
%(str(check_point*(nr_of_check_points-1) + step), \
str(difference)), verbose=5)
weights_old = weights_new.copy()
converged = (difference < conv_threshold)
write_message("PageRank calculated for all recids finnished in %s steps. \
The threshold was %s" % (str(nr_of_check_points), str(difference)),\
verbose=2)
return weights_old
def pagerank_ext(conv_threshold, check_point, len_, sparse, semi_sparse):
"""the core function of the PAGERANK_EXT method
returns an array with the ranks coresponding to each recid"""
weights_old = array((), float32)
weights_old = ones((len_), float32)
weights_new = array((), float32)
converged = False
nr_of_check_points = 0
difference = len_
while not converged:
nr_of_check_points += 1
for step in (range(check_point)):
weights_new = zeros((len_), float32)
for (i, j) in sparse.keys():
weights_new[i] += sparse[(i, j)]*weights_old[j]
total_sum = 0.0
for j in semi_sparse:
total_sum += semi_sparse[j]*weights_old[j]
weights_new[1:len_] = weights_new[1:len_] + total_sum
if step == check_point - 1:
diff = weights_new - weights_old
difference = sqrt(dot(diff, diff))/len_
write_message("Finished step: %s, %s " \
% (str(check_point*(nr_of_check_points-1) + step), \
str(difference)), verbose=5)
weights_old = weights_new.copy()
converged = (difference < conv_threshold)
write_message("PageRank calculated for all recids finnished in %s steps. \
The threshold was %s" % (str(nr_of_check_points), \
str(difference)), verbose=2)
#return weights_old[1:len_]/(len_ - weights_old[0])
return weights_old[1:len_]
def pagerank_time(conv_threshold, check_point, len_, \
sparse, semi_sparse, semi_sparse_coeficient, date_coef):
"""the core function of the PAGERANK_TIME method: pageRank + time decay
returns an array with the ranks coresponding to each recid"""
weights_old = array((), float32)
weights_old = ones((len_), float32) # initial weights
weights_new = array((), float32)
converged = False
nr_of_check_points = 0
difference = len_
while not converged:
nr_of_check_points += 1
for step in (range(check_point)):
weights_new = zeros((len_), float32)
for (i, j) in sparse.keys():
weights_new[i] += sparse[(i, j)]*weights_old[j]
semi_total = 0.0
for j in semi_sparse:
semi_total += weights_old[j]*date_coef[j]
zero_total = 0.0
for i in range(len_):
zero_total += weights_old[i]*date_coef[i]
#dates = array(date_coef.keys())
#zero_total = dot(weights_old, dates)
weights_new = weights_new + semi_sparse_coeficient * semi_total + \
(1.0/len_ - semi_sparse_coeficient) * zero_total
if step == check_point - 1:
diff = weights_new - weights_old
difference = sqrt(dot(diff, diff))/len_
write_message("Finished step: %s, %s " \
% (str(check_point*(nr_of_check_points-1) + step), \
str(difference)), verbose=5)
weights_old = weights_new.copy()
converged = (difference < conv_threshold)
write_message("PageRank calculated for all recids finnished in %s steps.\
The threshold was %s" % (str(nr_of_check_points), \
str(difference)), verbose=2)
return weights_old
def citation_rank_time(cit, dict_of_ids, date_coef, dates, decimals):
"""returns a dictionary recid:weight based on the total number of
citations as function of time"""
dict_of_ranks = {}
for key in dict_of_ids:
if key in cit:
dict_of_ranks[key] = 0
for recid in cit[key]:
dict_of_ranks[key] += date_coef[dict_of_ids[recid]]
dict_of_ranks[key] = round(dict_of_ranks[key], decimals) \
+ dates[dict_of_ids[key]]* pow(10, 0-4-decimals)
else:
dict_of_ranks[key] = dates[dict_of_ids[key]]* pow(10, 0-4-decimals)
write_message("Citation rank calculated", verbose=2)
return dict_of_ranks
def get_ranks(weights, dict_of_ids, mult, dates, decimals):
"""returns a dictionary recid:value, where value is the weight of the
recid paper; the second order is the reverse time order,
from recent to past"""
dict_of_ranks = {}
for item in dict_of_ids:
dict_of_ranks[item] = round(weights[dict_of_ids[item]]* mult, decimals)\
+ dates[dict_of_ids[item]]* pow(10, 0-4-decimals)
#dict_of_ranks[item] = weights[dict_of_ids[item]]
return dict_of_ranks
def sort_weights(dict_of_ranks):
"""sorts the recids based on weights(first order)
and on dates(second order)"""
ranks_by_citations = sorted(dict_of_ranks.keys(), lambda x, y: \
cmp(dict_of_ranks[y], dict_of_ranks[x]))
return ranks_by_citations
def normalize_weights(dict_of_ranks):
"""the weights should be normalized to 100, so they woun't be
different from the weights from other ranking methods"""
max_weight = 0.0
for recid in dict_of_ranks:
weight = dict_of_ranks[recid]
if weight > max_weight:
max_weight = weight
for recid in dict_of_ranks:
dict_of_ranks[recid] = round(dict_of_ranks[recid] * 100.0/max_weight, 3)
def write_first_ranks_to_file(ranks_by_citations, dict_of_ranks, \
nr_of_ranks, filename):
"""Writes the first n results of the ranking method into a file"""
try:
ranks_file = open(filename, "w")
except StandardError:
write_message("Problems with file: %s" % filename, sys.stderr)
raise StandardError
for i in range(nr_of_ranks):
ranks_file.write(str(i+1) + "\t" + str(ranks_by_citations[i]) + \
"\t" + str(dict_of_ranks[ranks_by_citations[i]]) + "\n")
ranks_file.close()
write_message("The first %s pairs recid:rank in the ranking order \
are written into this file: %s" % (nr_of_ranks, filename), verbose=2)
def del_rank_method_data(rank_method_code):
"""Delete the data for a rank method from rnkMETHODDATA table"""
id_ = run_sql("SELECT id from rnkMETHOD where name=%s", (rank_method_code, ))
run_sql("DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s", (id_[0][0], ))
def into_db(dict_of_ranks, rank_method_code):
"""Writes into the rnkMETHODDATA table the ranking results"""
method_id = run_sql("SELECT id from rnkMETHOD where name=%s", \
(rank_method_code, ))
del_rank_method_data(rank_method_code)
serialized_data = serialize_via_marshal(dict_of_ranks)
method_id_str = str(method_id[0][0])
run_sql("INSERT INTO rnkMETHODDATA(id_rnkMETHOD, relevance_data) \
VALUES(%s, %s) ", (method_id_str, serialized_data, ))
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
run_sql("UPDATE rnkMETHOD SET last_updated=%s WHERE name=%s", \
(date, rank_method_code))
write_message("Finished writing the ranks into rnkMETHOD table", verbose=5)
def run_pagerank(cit, dict_of_ids, len_, ref, damping_factor, \
conv_threshold, check_point, dates):
"""returns the final form of the ranks when using pagerank method"""
write_message("Running the PageRank method", verbose=5)
sparse, semi_sparse, semi_sparse_coeficient = \
construct_sparse_matrix(cit, ref, dict_of_ids, len_, damping_factor)
weights = pagerank(conv_threshold, check_point, len_, \
sparse, semi_sparse, semi_sparse_coeficient)
dict_of_ranks = get_ranks(weights, dict_of_ids, 1, dates, 2)
return dict_of_ranks
def run_pagerank_ext(cit, dict_of_ids, ref, ext_links, \
conv_threshold, check_point, alpha, beta, dates):
"""returns the final form of the ranks when using pagerank_ext method"""
write_message("Running the PageRank with external links method", verbose=5)
len_ = len(dict_of_ids)
sparse, semi_sparse = construct_sparse_matrix_ext(cit, ref, \
ext_links, dict_of_ids, alpha, beta)
weights = pagerank_ext(conv_threshold, check_point, \
len_ + 1, sparse, semi_sparse)
dict_of_ranks = get_ranks(weights, dict_of_ids, 1, dates, 2)
return dict_of_ranks
def run_pagerank_time(cit, dict_of_ids, len_, ref, damping_factor, \
conv_threshold, check_point, date_coef, dates):
"""returns the final form of the ranks when using
pagerank + time decay method"""
write_message("Running the PageRank_time method", verbose=5)
sparse, semi_sparse, semi_sparse_coeficient = \
construct_sparse_matrix_time(cit, ref, dict_of_ids, \
damping_factor, date_coef)
weights = pagerank_time(conv_threshold, check_point, len_, \
sparse, semi_sparse, semi_sparse_coeficient, date_coef)
dict_of_ranks = get_ranks(weights, dict_of_ids, 100000, dates, 2)
return dict_of_ranks
def run_citation_rank_time(cit, dict_of_ids, date_coef, dates):
"""returns the final form of the ranks when using citation count
as function of time method"""
write_message("Running the citation rank with time decay method", verbose=5)
dict_of_ranks = citation_rank_time(cit, dict_of_ids, date_coef, dates, 2)
return dict_of_ranks
def spearman_rank_correlation_coef(rank1, rank2, len_):
"""rank1 and rank2 are arrays containing the recids in the ranking order
returns the corelation coeficient (-1 <= c <= 1) between 2 rankings
the closec c is to 1, the more correlated are the two ranking methods"""
total = 0
for i in range(len_):
rank_value = rank2.index(rank1[i])
total += (i - rank_value)*(i - rank_value)
return 1 - (6.0 * total) / (len_*(len_*len_ - 1))
def remove_loops(cit, dates, dict_of_ids):
"""when using time decay, new papers that are part of a loop
are accumulating a lot of fake weight"""
new_cit = {}
for recid in cit:
new_cit[recid] = []
for cited_by in cit[recid]:
if dates[dict_of_ids[cited_by]] >= dates[dict_of_ids[recid]]:
if cited_by in cit:
if recid not in cit[cited_by]:
new_cit[recid].append(cited_by)
else:
write_message("Loop removed: %s <-> %s" \
%(cited_by, recid), verbose=9)
else:
new_cit[recid].append(cited_by)
else:
write_message("Loop removed: %s <-> %s" \
%(cited_by, recid), verbose=9)
write_message("Simple loops removed", verbose=5)
return new_cit
def calculate_time_weights(len_, time_decay, dates):
"""calculates the time coeficients for each paper"""
current_year = int(datetime.datetime.now().strftime("%Y"))
date_coef = {}
for j in range(len_):
date_coef[j] = exp(time_decay*(dates[j] - current_year))
write_message("Time weights calculated", verbose=5)
write_message("Time weights: %s" % str(date_coef), verbose=9)
return date_coef
def get_dates(function, config, dict_of_ids):
"""returns a dictionary containing the year of
publishing for each paper"""
try:
file_for_dates = config.get(function, "file_with_dates")
dates = get_dates_from_file(file_for_dates, dict_of_ids)
except (ConfigParser.NoOptionError, StandardError), err:
write_message("If you want to read the dates from file set up the \
'file_for_dates' variable in the config file [%s]" %err, verbose=3)
try:
publication_year_tag = config.get(function, "publication_year_tag")
dummy = int(publication_year_tag[0:3])
except (ConfigParser.NoOptionError, StandardError):
write_message("You need to set up correctly the publication_year_tag \
in the cfg file", sys.stderr)
raise Exception
try:
creation_date_tag = config.get(function, "creation_date_tag")
dummy = int(creation_date_tag[0:3])
except (ConfigParser.NoOptionError, StandardError):
write_message("You need to set up correctly the creation_date_tag \
in the cfg file", sys.stderr)
raise Exception
dates = get_dates_from_db(dict_of_ids, publication_year_tag, \
creation_date_tag)
return dates
def citerank(rank_method_code):
"""new ranking method based on the citation graph"""
write_message("Running rank method: %s" % rank_method_code, verbose=0)
try:
file_ = CFG_ETCDIR + "/bibrank/" + rank_method_code + ".cfg"
config = ConfigParser.ConfigParser()
config.readfp(open(file_))
except StandardError:
write_message("Cannot find configuration file: %s" % file_, sys.stderr)
raise StandardError
# the file for citations needs to have the following format:
#each line needs to be x[tab]y, where x cites y; x,y are recids
function = config.get("rank_method", "function")
try:
file_for_citations = config.get(function, "file_with_citations")
cit, dict_of_ids = get_citations_from_file(file_for_citations)
except (ConfigParser.NoOptionError, StandardError), err:
write_message("If you want to read the citation data from file set up \
the file_for_citations parameter in the config file [%s]" %err, verbose=2)
cit, dict_of_ids = get_citations_from_db()
len_ = len(dict_of_ids.keys())
write_message("Number of nodes(papers) to rank : %s" % str(len_), verbose=3)
if len_ == 0:
write_message("Error: No citations to read!", sys.stderr)
raise Exception
try:
method = config.get(function, "citerank_method")
except ConfigParser.NoOptionError, err:
write_message("Exception: %s " %err, sys.stderr)
raise Exception
write_message("Running %s method." % method, verbose=2)
dates = get_dates(function, config, dict_of_ids)
if method == "citation_time":
try:
time_decay = float(config.get(function, "time_decay"))
except (ConfigParser.NoOptionError, ValueError), err:
write_message("Exception: %s" % err, sys.stderr)
raise Exception
date_coef = calculate_time_weights(len_, time_decay, dates)
#cit = remove_loops(cit, dates, dict_of_ids)
dict_of_ranks = \
run_citation_rank_time(cit, dict_of_ids, date_coef, dates)
else:
try:
conv_threshold = float(config.get(function, "conv_threshold"))
check_point = int(config.get(function, "check_point"))
damping_factor = float(config.get(function, "damping_factor"))
write_message("Parameters: d = %s, conv_threshold = %s, \
check_point = %s" %(str(damping_factor), \
str(conv_threshold), str(check_point)), verbose=5)
except (ConfigParser.NoOptionError, StandardError), err:
write_message("Exception: %s" % err, sys.stderr)
raise Exception
if method == "pagerank_classic":
ref = construct_ref_array(cit, dict_of_ids, len_)
use_ext_cit = ""
try:
use_ext_cit = config.get(function, "use_external_citations")
write_message("Pagerank will use external citations: %s" \
%str(use_ext_cit), verbose=5)
except (ConfigParser.NoOptionError, StandardError), err:
write_message("%s" % err, verbose=2)
if use_ext_cit == "yes":
try:
ext_citation_file = config.get(function, "ext_citation_file")
ext_links = get_external_links_from_file(ext_citation_file,
ref, dict_of_ids)
except (ConfigParser.NoOptionError, StandardError):
write_message("If you want to read the external citation \
data from file set up the ext_citation_file parameter in the config. file", \
verbose=3)
try:
reference_tag = config.get(function, "ext_reference_tag")
dummy = int(reference_tag[0:3])
except (ConfigParser.NoOptionError, StandardError):
write_message("You need to set up correctly the \
reference_tag in the cfg file", sys.stderr)
raise Exception
ext_links = get_external_links_from_db(ref, \
dict_of_ids, reference_tag)
avg = avg_ext_links_with_0(ext_links)
if avg < 1:
write_message("This method can't be ran. There is not \
enough information about the external citation. Hint: check the reference tag", \
sys.stderr)
raise Exception
avg_ext_links_without_0(ext_links)
try:
alpha = float(config.get(function, "ext_alpha"))
beta = float(config.get(function, "ext_beta"))
except (ConfigParser.NoOptionError, StandardError), err:
write_message("Exception: %s" % err, sys.stderr)
raise Exception
dict_of_ranks = run_pagerank_ext(cit, dict_of_ids, ref, \
ext_links, conv_threshold, check_point, alpha, beta, dates)
else:
dict_of_ranks = run_pagerank(cit, dict_of_ids, len_, ref, \
damping_factor, conv_threshold, check_point, dates)
elif method == "pagerank_time":
try:
time_decay = float(config.get(function, "time_decay"))
write_message("Parameter: time_decay = %s" \
%str(time_decay), verbose=5)
except (ConfigParser.NoOptionError, StandardError), err:
write_message("Exception: %s" % err, sys.stderr)
raise Exception
date_coef = calculate_time_weights(len_, time_decay, dates)
cit = remove_loops(cit, dates, dict_of_ids)
ref = construct_ref_array(cit, dict_of_ids, len_)
dict_of_ranks = run_pagerank_time(cit, dict_of_ids, len_, ref, \
damping_factor, conv_threshold, check_point, date_coef, dates)
else:
write_message("Error: Unknown ranking method. \
Please check the ranking_method parameter in the config. file.", sys.stderr)
raise Exception
try:
filename_ranks = config.get(function, "output_ranks_to_filename")
max_ranks = config.get(function, "output_rank_limit")
if not max_ranks.isdigit():
max_ranks = len_
else:
max_ranks = int(max_ranks)
if max_ranks > len_:
max_ranks = len_
ranks = sort_weights(dict_of_ranks)
write_message("Ranks: %s" % str(ranks), verbose=9)
write_first_ranks_to_file(ranks, dict_of_ranks, \
max_ranks, filename_ranks)
except (ConfigParser.NoOptionError, StandardError):
write_message("If you want the ranks to be printed in a file you have \
to set output_ranks_to_filename and output_rank_limit \
parameters in the configuration file", verbose=3)
normalize_weights(dict_of_ranks)
into_db(dict_of_ranks, rank_method_code)
|
lbjay/cds-invenio
|
modules/bibrank/lib/bibrank_citerank_indexer.py
|
Python
|
gpl-2.0
| 37,250
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
LecoS
A QGIS plugin
Contains analytical functions for landscape analysis
-------------------
begin : 2012-09-06
copyright : (C) 2013 by Martin Jung
email : martinjung at zoho.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import PyQT bindings
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Sextante bindings
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import Setting, ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
# Import Processing bindings
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.Processing import Processing
try:
from processing.core.ProcessingUtils import ProcessingUtils
except ImportError: # for qgis dev
# new processing update
from processing.tools.system import *
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
try:
from processing.core.QGisLayers import QGisLayers
except: # for qgis dev
# new processing update
from processing.tools import dataobjects, vector
# For Processing update
try:
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputTable import OutputTable
except ImportError:
from processing.core.outputs import OutputVector
from processing.core.outputs import OutputRaster
from processing.core.outputs import OutputTable
try:
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterTableField import ParameterTableField
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterRange import ParameterRange
from processing.parameters.ParameterFixedTable import ParameterFixedTable
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterFile import ParameterFile
from processing.parameters.ParameterCrs import ParameterCrs
except ImportError:
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterRange
from processing.core.parameters import ParameterFixedTable
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterCrs
# Import numpy and scipy
import numpy
try:
import scipy
except ImportError:
QMessageBox.critical(QDialog(),"LecoS: Warning","Please install scipy (http://scipy.org/) in your QGIS python path.")
sys.exit(0)
from scipy import ndimage # import ndimage module seperately for easy access
# Import GDAL and ogr
try:
from osgeo import gdal
except ImportError:
import gdal
try:
from osgeo import gdalconst
except ImportError:
import gdalconst
try:
from osgeo import ogr
except ImportError:
import ogr
try:
import string
except ImportError:
pass
# Register gdal and ogr drivers
#if hasattr(gdal,"AllRegister"): # Can register drivers
# gdal.AllRegister() # register all gdal drivers
#if hasattr(ogr,"RegisterAll"):
# ogr.RegisterAll() # register all ogr drivers
import os, sys
# Import functions and metrics
import lecos_functions as func
try:
import nlmpy
except ImportError:
nlmpy = False
## Algorithms ##
class SpatialRandom(GeoAlgorithm):
# Define constants
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Spatial random"
self.cmdName = "nlmpy:spatialrandom"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.random(rows, cols, mask=mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class PlanarGradient(GeoAlgorithm):
# Define constants
DIRECTION = "DIRECTION"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Planar Gradient"
self.cmdName = "nlmpy:planargradient"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.DIRECTION, "Direction of the gradient (optional)", 0, None, 0))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
direction = self.getParameterValue(self.DIRECTION)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.planarGradient(rows, cols,direction,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class EdgeGradient(GeoAlgorithm):
# Define constants
DIRECTION = "DIRECTION"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Edge Gradient"
self.cmdName = "nlmpy:edgegradient"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.DIRECTION, "Direction of the gradient (optional)", 0, None, 0))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
direction = self.getParameterValue(self.DIRECTION)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.edgeGradient(rows, cols,direction,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class DistanceGradient(GeoAlgorithm):
# Define constants
SOURCE = "SOURCE"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Distance Gradient"
self.cmdName = "nlmpy:distancegradient"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterRaster(self.SOURCE, "Source raster layer", True))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
inputSource = self.getParameterValue(self.SOURCE)
mask = self.getParameterValue(self.MASK)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
# Source
src = gdal.Open(str(inputSource), gdalconst.GA_ReadOnly)
src_geotrans = src.GetGeoTransform()
cols = src.RasterXSize
rows = src.RasterYSize
nodata = src.GetRasterBand(1).GetNoDataValue() # keep the nodata value
array = src.GetRasterBand(1).ReadAsArray()
# Do the calc
result = nlmpy.distanceGradient(array,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,src_geotrans)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class MidpointDisplacement(GeoAlgorithm):
# Define constants
SCOR = "SCOR"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Midpoint displacement"
self.cmdName = "nlmpy:mpd"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.SCOR, "Level of Spatial Autocorrelation (0 - 1)", False, True,0.5))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
scor = self.getParameterValue(self.SCOR)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.mpd(rows, cols,scor,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class RandomRectangularCluster(GeoAlgorithm):
# Define constants
MINL = "MINL"
MAXL = "MAXL"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Random rectangular cluster"
self.cmdName = "nlmpy:randomreccluster"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.MINL, "Minimum length of each cluster)", 0, None, 1))
self.addParameter(ParameterNumber(self.MAXL, "Maximum length of each cluster", 0, None, 10))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
minl = self.getParameterValue(self.MINL)
maxl = self.getParameterValue(self.MAXL)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.randomRectangularCluster(rows, cols,minl,maxl,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class RandomElementNN(GeoAlgorithm):
# Define constants
NELE = "NELE"
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Random element Nearest-neighbour"
self.cmdName = "nlmpy:randomelenn"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.NELE, "Number of elements randomly selected", 0, None, 3))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
nele = self.getParameterValue(self.NELE)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.randomElementNN(rows, cols,nele,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class RandomClusterNN(GeoAlgorithm):
# Define constants
NCLU = "NCLU"
NEIG = "NEIG"
w = ['4-neighbourhood','8-neighbourhood','diagonal']
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Random cluster Nearest-neighbour"
self.cmdName = "nlmpy:randomclunn"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterNumber(self.NCLU, "Proportions of elements to form cluster ( 0 - 1 )",False, True,0.5))
self.addParameter(ParameterSelection(self.NEIG, "Neighbourhood structure", self.w, 0))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
nclu = self.getParameterValue(self.NCLU)
what = self.w[self.getParameterValue(self.NEIG)]
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.randomClusterNN(rows, cols,nclu,what,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class LinearRescale01(GeoAlgorithm):
# Define constants
SOURCE = "SOURCE"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Linear rescale"
self.cmdName = "nlmpy:linearrescale"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterRaster(self.SOURCE, "Source raster layer", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
inputSource = self.getParameterValue(self.SOURCE)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
# Source
src = gdal.Open(str(inputSource), gdalconst.GA_ReadOnly)
src_geotrans = src.GetGeoTransform()
cols = src.RasterXSize
rows = src.RasterYSize
nodata = src.GetRasterBand(1).GetNoDataValue() # keep the nodata value
array = src.GetRasterBand(1).ReadAsArray()
# Do the calc
result = nlmpy.linearRescale01(array)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,src_geotrans)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class RandomUniformed01(GeoAlgorithm):
# Define constants
MASK = "MASK"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
EXTENT = "EXTENT"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Random uniform"
self.cmdName = "nlmpy:randomuniform"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterExtent(self.EXTENT, "Output extent",True))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
mask = self.getParameterValue(self.MASK)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
ext = self.getParameterValue(self.EXTENT)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
try:
ext = string.split(ext,",") # split
except AttributeError: # Extent was empty, raise error
raise GeoAlgorithmExecutionException("Please set an extent for the generated raster") # Processing
# Create output layer
xmin = float(ext[0])
xmax = float(ext[1])
ymin = float(ext[2])
ymax = float(ext[3])
gt = (xmin,cs,0,ymax,0,-cs)
nodata = -9999
cols = int( round( (xmax-xmin)/cs ) )
rows = int( round( (ymax-ymin)/cs ) )
# Do the calc
result = nlmpy.randomUniform01(rows, cols, mask=mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,gt)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class MeanOfCluster(GeoAlgorithm):
# Define constants
CLUSTERARRAY = "CLUSTERARRAY"
SOURCE = "SOURCE"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
CS = "CS"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Mean within cluster"
self.cmdName = "nlmpy:meanofcluster"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterRaster(self.CLUSTERARRAY, "Clustered raster layer", True))
self.addParameter(ParameterRaster(self.SOURCE, "Data raster layer", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
clusterSource = self.getParameterValue(self.CLUSTERARRAY)
inputSource = self.getParameterValue(self.SOURCE)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
# Cluster array
src = gdal.Open(str(clusterSource), gdalconst.GA_ReadOnly)
cl_array = src.GetRasterBand(1).ReadAsArray()
# Source
src = gdal.Open(str(inputSource), gdalconst.GA_ReadOnly)
src_geotrans = src.GetGeoTransform()
cols = src.RasterXSize
rows = src.RasterYSize
nodata = src.GetRasterBand(1).GetNoDataValue() # keep the nodata value
array = src.GetRasterBand(1).ReadAsArray()
# Do the calc
result = nlmpy.meanOfCluster(cl_array,array)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,src_geotrans)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
class ClassifyArray(GeoAlgorithm):
# Define constants
SOURCE = "SOURCE"
CLASSES = "CLASSES"
# Output
OUTPUT_RASTER = "OUTPUT_RASTER"
CS = "CS"
MASK = "MASK"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + os.sep+"icons"+os.sep+"img_nlmpy.png")
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
self.name = "Classfiy proportional Raster"
self.cmdName = "nlmpy:classifyraster"
self.group = "Neutral landscape model (NLMpy)"
self.addParameter(ParameterRaster(self.SOURCE, "Cluster raster layer", True))
self.addParameter(ParameterNumber(self.CLASSES, "Classify proportional raster to number of classes",2, None,2))
self.addParameter(ParameterRaster(self.MASK, "Mask (optional)", True))
self.addOutput(OutputRaster(self.OUTPUT_RASTER, "Result output"))
self.addParameter(ParameterNumber(self.CS, "Output Cellsize", 10, None, 1))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
ncla = self.getParameterValue(self.CLASSES)
inputSource = self.getParameterValue(self.SOURCE)
output = self.getOutputValue(self.OUTPUT_RASTER)
cs = self.getParameterValue(self.CS)
mask = self.getParameterValue(self.MASK)
if mask != None:
src = gdal.Open(str(mask), gdalconst.GA_ReadOnly)
mask = src.GetRasterBand(1).ReadAsArray()
# Source
src = gdal.Open(str(inputSource), gdalconst.GA_ReadOnly)
src_geotrans = src.GetGeoTransform()
cols = src.RasterXSize
rows = src.RasterYSize
nodata = src.GetRasterBand(1).GetNoDataValue() # keep the nodata value
array = src.GetRasterBand(1).ReadAsArray()
# Classes
cl = range(1,ncla+1)
# Do the calc
result = nlmpy.classifyArray(array,cl,mask)
# Create output raster
func.createRaster(output,cols,rows,result,nodata,src_geotrans)
def help(self):
helppath = os.path.join(os.path.dirname(__file__), "sextante_info", self.cmdName + ".html")
if os.path.isfile(helppath):
return False, helppath
else:
return False, None
|
Martin-Jung/LecoS
|
nlmpy_sextantewrapper.py
|
Python
|
gpl-3.0
| 34,338
|
"""
Support to interface with the Emby API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.emby/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_SEEK, SUPPORT_STOP, SUPPORT_PREVIOUS_TRACK, MediaPlayerDevice,
SUPPORT_PLAY, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_API_KEY, CONF_PORT, CONF_SSL, DEVICE_DEFAULT_NAME,
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN)
from homeassistant.helpers.event import (track_utc_time_change)
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['pyemby==0.2']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MEDIA_TYPE_TRAILER = 'trailer'
DEFAULT_PORT = 8096
_LOGGER = logging.getLogger(__name__)
SUPPORT_EMBY = SUPPORT_PAUSE | SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_STOP | SUPPORT_SEEK | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default='localhost'): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Emby platform."""
from pyemby.emby import EmbyRemote
_host = config.get(CONF_HOST)
_key = config.get(CONF_API_KEY)
_port = config.get(CONF_PORT)
if config.get(CONF_SSL):
_protocol = "https"
else:
_protocol = "http"
_url = '{}://{}:{}'.format(_protocol, _host, _port)
_LOGGER.debug('Setting up Emby server at: %s', _url)
embyserver = EmbyRemote(_key, _url)
emby_clients = {}
emby_sessions = {}
track_utc_time_change(hass, lambda now: update_devices(), second=30)
@Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update the devices objects."""
devices = embyserver.get_sessions()
if devices is None:
_LOGGER.error('Error listing Emby devices.')
return
new_emby_clients = []
for device in devices:
if device['DeviceId'] == embyserver.unique_id:
break
if device['DeviceId'] not in emby_clients:
_LOGGER.debug('New Emby DeviceID: %s. Adding to Clients.',
device['DeviceId'])
new_client = EmbyClient(embyserver, device, emby_sessions,
update_devices, update_sessions)
emby_clients[device['DeviceId']] = new_client
new_emby_clients.append(new_client)
else:
emby_clients[device['DeviceId']].set_device(device)
if new_emby_clients:
add_devices_callback(new_emby_clients)
@Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_sessions():
"""Update the sessions objects."""
sessions = embyserver.get_sessions()
if sessions is None:
_LOGGER.error('Error listing Emby sessions')
return
emby_sessions.clear()
for session in sessions:
emby_sessions[session['DeviceId']] = session
update_devices()
update_sessions()
class EmbyClient(MediaPlayerDevice):
"""Representation of a Emby device."""
# pylint: disable=too-many-arguments, too-many-public-methods,
def __init__(self, client, device, emby_sessions, update_devices,
update_sessions):
"""Initialize the Emby device."""
self.emby_sessions = emby_sessions
self.update_devices = update_devices
self.update_sessions = update_sessions
self.client = client
self.set_device(device)
self.media_status_last_position = None
self.media_status_received = None
def set_device(self, device):
"""Set the device property."""
self.device = device
@property
def unique_id(self):
"""Return the id of this emby client."""
return '{}.{}'.format(
self.__class__, self.device['DeviceId'])
@property
def supports_remote_control(self):
"""Return control ability."""
return self.device['SupportsRemoteControl']
@property
def name(self):
"""Return the name of the device."""
return 'emby_{}'.format(self.device['DeviceName']) or \
DEVICE_DEFAULT_NAME
@property
def session(self):
"""Return the session, if any."""
if self.device['DeviceId'] not in self.emby_sessions:
return None
return self.emby_sessions[self.device['DeviceId']]
@property
def now_playing_item(self):
"""Return the currently playing item, if any."""
session = self.session
if session is not None and 'NowPlayingItem' in session:
return session['NowPlayingItem']
@property
def state(self):
"""Return the state of the device."""
session = self.session
if session:
if 'NowPlayingItem' in session:
if session['PlayState']['IsPaused']:
return STATE_PAUSED
else:
return STATE_PLAYING
else:
return STATE_IDLE
# This is nasty. Need to find a way to determine alive
else:
return STATE_OFF
return STATE_UNKNOWN
def update(self):
"""Get the latest details."""
self.update_devices(no_throttle=True)
self.update_sessions(no_throttle=True)
# Check if we should update progress
try:
position = self.session['PlayState']['PositionTicks']
except (KeyError, TypeError):
self.media_status_last_position = None
self.media_status_received = None
else:
position = int(position) / 10000000
if position != self.media_status_last_position:
self.media_status_last_position = position
self.media_status_received = dt_util.utcnow()
def play_percent(self):
"""Return current media percent complete."""
if self.now_playing_item['RunTimeTicks'] and \
self.session['PlayState']['PositionTicks']:
try:
return int(self.session['PlayState']['PositionTicks']) / \
int(self.now_playing_item['RunTimeTicks']) * 100
except KeyError:
return 0
else:
return 0
@property
def app_name(self):
"""Return current user as app_name."""
# Ideally the media_player object would have a user property.
try:
return self.device['UserName']
except KeyError:
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self.now_playing_item is not None:
try:
return self.now_playing_item['Id']
except KeyError:
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
if self.now_playing_item is None:
return None
try:
media_type = self.now_playing_item['Type']
if media_type == 'Episode':
return MEDIA_TYPE_TVSHOW
elif media_type == 'Movie':
return MEDIA_TYPE_VIDEO
elif media_type == 'Trailer':
return MEDIA_TYPE_TRAILER
return None
except KeyError:
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self.now_playing_item and self.media_content_type:
try:
return int(self.now_playing_item['RunTimeTicks']) / 10000000
except KeyError:
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self.media_status_last_position
@property
def media_position_updated_at(self):
"""
When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self.media_status_received
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.now_playing_item is not None:
try:
return self.client.get_image(
self.now_playing_item['ThumbItemId'], 'Thumb', 0)
except KeyError:
try:
return self.client.get_image(
self.now_playing_item[
'PrimaryImageItemId'], 'Primary', 0)
except KeyError:
return None
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
if self.now_playing_item is not None:
return self.now_playing_item['Name']
@property
def media_season(self):
"""Season of curent playing media (TV Show only)."""
if self.now_playing_item is not None and \
'ParentIndexNumber' in self.now_playing_item:
return self.now_playing_item['ParentIndexNumber']
@property
def media_series_title(self):
"""The title of the series of current playing media (TV Show only)."""
if self.now_playing_item is not None and \
'SeriesName' in self.now_playing_item:
return self.now_playing_item['SeriesName']
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
if self.now_playing_item is not None and \
'IndexNumber' in self.now_playing_item:
return self.now_playing_item['IndexNumber']
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.supports_remote_control:
return SUPPORT_EMBY
else:
return None
def media_play(self):
"""Send play command."""
if self.supports_remote_control:
self.client.play(self.session)
def media_pause(self):
"""Send pause command."""
if self.supports_remote_control:
self.client.pause(self.session)
def media_next_track(self):
"""Send next track command."""
self.client.next_track(self.session)
def media_previous_track(self):
"""Send previous track command."""
self.client.previous_track(self.session)
|
kyvinh/home-assistant
|
homeassistant/components/media_player/emby.py
|
Python
|
apache-2.0
| 11,042
|
def adagrad(opfunc, x, config, state=None):
"""ADAGRAD implementation
ARGS:
- `opfunc` : a function that takes a single input (X), the point of
evaluation, and returns f(X) and df/dX
- `x` : the initial point
- `state` : a table describing the state of the optimizer; after each
call the state is modified
- `state['learningRate']` : learning rate
- `state['paramVariance']` : vector of temporal variances of parameters
- `state['weightDecay']` : scalar that controls weight decay
RETURN:
- `x` : the new x vector
- `f(x)` : the value of optimized function, evaluated before the update
"""
# (0) get/update state
if config is None and state is None:
raise ValueError("adagrad requires a dictionary to retain state between iterations")
state = state if state is not None else config
lr = config.get('learningRate', 1e-3)
lrd = config.get('learningRateDecay', 0)
wd = config.get('weightDecay', 0)
state['evalCounter'] = state.get('evalCounter', 0)
# (1) evaluate f(x) and df/dx
fx, dfdx = opfunc(x)
# (2) weight decay with a single parameter
if wd != 0:
dfdx.add_(wd, x)
# (3) learning rate decay (annealing)
clr = lr / (1 + state['evalCounter'] * lrd)
# (4) parameter update with single or individual learning rates
if 'paramVariance' not in state:
state['paramVariance'] = x.new().resize_as_(dfdx).zero_()
state['paramStd'] = x.new().resize_as_(dfdx)
state['paramVariance'].addcmul_(1, dfdx, dfdx)
state['paramStd'].resize_as_(state['paramVariance']).copy_(state['paramVariance']).sqrt_()
x.addcdiv_(-clr, dfdx, state['paramStd'].add_(1e-10))
# (5) update evaluation counter
state['evalCounter'] += 1
# return x*, f(x) before optimization
return x, fx
|
RPGOne/Skynet
|
pytorch-master/torch/legacy/optim/adagrad.py
|
Python
|
bsd-3-clause
| 1,854
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import exception
from heat.engine import stack_lock
from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object
from heat.tests import common
from heat.tests import utils
class StackLockTest(common.HeatTestCase):
def setUp(self):
super(StackLockTest, self).setUp()
self.context = utils.dummy_context()
self.stack_id = "aae01f2d-52ae-47ac-8a0d-3fde3d220fea"
self.engine_id = stack_lock.StackLock.generate_engine_id()
stack = mock.MagicMock()
stack.id = self.stack_id
stack.name = "test_stack"
stack.action = "CREATE"
self.patchobject(stack_object.Stack, 'get_by_id',
return_value=stack)
class TestThreadLockException(Exception):
pass
def test_successful_acquire_new_lock(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
slock.acquire()
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_current_engine(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value=self.engine_id)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_successful_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(self.stack_id, 'fake-engine-id',
self.engine_id)
def test_failed_acquire_existing_lock_engine_alive(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=True)
self.assertRaises(exception.ActionInProgress, slock.acquire)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
def test_failed_acquire_existing_lock_engine_dead(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value='fake-engine-id2')
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
mock_create.assert_called_once_with(self.stack_id, self.engine_id)
mock_steal.assert_called_once_with(self.stack_id, 'fake-engine-id',
self.engine_id)
def test_successful_acquire_with_retry(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
side_effect=[True, None])
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
slock.acquire()
mock_create.assert_has_calls(
[mock.call(self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.stack_id, 'fake-engine-id', self.engine_id)] * 2)
def test_failed_acquire_one_retry_only(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
'create',
return_value='fake-engine-id')
mock_steal = self.patchobject(stack_lock_object.StackLock,
'steal',
return_value=True)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
self.patchobject(slock, 'engine_alive', return_value=False)
self.assertRaises(exception.ActionInProgress, slock.acquire)
mock_create.assert_has_calls(
[mock.call(self.stack_id, self.engine_id)] * 2)
mock_steal.assert_has_calls(
[mock.call(self.stack_id, 'fake-engine-id', self.engine_id)] * 2)
def test_thread_lock_context_mgr_exception_acquire_success(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_thread_lock_context_mgr_exception_acquire_fail(self):
stack_lock_object.StackLock.create = mock.Mock(
return_value=self.engine_id)
stack_lock_object.StackLock.release = mock.Mock()
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise exception.ActionInProgress
self.assertRaises(exception.ActionInProgress, check_thread_lock)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_thread_lock_context_mgr_no_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
with slock.thread_lock():
self.assertEqual(1, stack_lock_object.StackLock.create.call_count)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_try_thread_lock_context_mgr_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.try_thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertEqual(1, stack_lock_object.StackLock.release.call_count)
def test_try_thread_lock_context_mgr_no_exception(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=None)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
with slock.try_thread_lock():
self.assertEqual(1, stack_lock_object.StackLock.create.call_count)
self.assertFalse(stack_lock_object.StackLock.release.called)
def test_try_thread_lock_context_mgr_existing_lock(self):
stack_lock_object.StackLock.create = mock.Mock(return_value=1234)
stack_lock_object.StackLock.release = mock.Mock(return_value=None)
slock = stack_lock.StackLock(self.context, self.stack_id,
self.engine_id)
def check_thread_lock():
with slock.try_thread_lock():
self.assertEqual(1,
stack_lock_object.StackLock.create.call_count)
raise self.TestThreadLockException
self.assertRaises(self.TestThreadLockException, check_thread_lock)
self.assertFalse(stack_lock_object.StackLock.release.called)
|
pratikmallya/heat
|
heat/tests/test_stack_lock.py
|
Python
|
apache-2.0
| 10,326
|
from __future__ import unicode_literals
from io import StringIO
import logging
import os
import shutil
import tempfile
from django.test import TestCase
from django.test.utils import override_settings
import mock
from archives.tasks import (
archive_artifact_from_jenkins, process_build_artifacts,
link_artifact_in_archive, generate_checksums)
from archives.models import Archive, ArchiveArtifact
from archives.transports import Transport, LocalTransport
from jenkins.tests.factories import ArtifactFactory, BuildFactory
from jenkins.models import Build
from projects.helpers import build_project
from projects.tasks import process_build_dependencies
from projects.models import ProjectDependency, ProjectBuildDependency
from projects.tests.factories import DependencyFactory, ProjectFactory
from .factories import ArchiveFactory
class LoggingTransport(Transport):
"""
Test archiver that just logs the calls the Archiver
code makes.
"""
def __init__(self, *args, **kwargs):
super(LoggingTransport, self).__init__(*args, **kwargs)
self.log = []
def start(self):
self.log.append("START")
def end(self):
self.log.append("END")
def archive_url(self, url, path, username, password):
self.log.append("%s -> %s %s:%s" % (url, path, username, password))
return 0
def generate_checksums(self, archived_artifact):
self.log.append(
"Checksums generated for %s" % archived_artifact)
def link_to_current(self, path):
self.log.append(
"Make %s current" % path)
def link_filename_to_filename(self, source, destination):
self.log.append(
"Link %s to %s" % (source, destination))
class LocalArchiveTestBase(TestCase):
def setUp(self):
self.basedir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.basedir)
class ArchiveArtifactFromJenkinsTaskTest(LocalArchiveTestBase):
def test_archive_artifact_from_jenkins(self):
"""
archive_artifact_from_jenkins should get a transport, and then call
start, end and archive_artifact on the transport.
the correct storage.
"""
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir)
dependency = DependencyFactory.create()
build = BuildFactory.create(job=dependency.job)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
items = archive.add_build(artifact.build)
fakefile = StringIO(u"Artifact from Jenkins")
with mock.patch("archives.transports.urllib2") as urllib2_mock:
urllib2_mock.urlopen.return_value = fakefile
archive_artifact_from_jenkins(items[artifact][0].pk)
[item] = list(archive.get_archived_artifacts_for_build(build))
filename = os.path.join(self.basedir, item.archived_path)
self.assertEqual(file(filename).read(), "Artifact from Jenkins")
self.assertEqual(21, item.archived_size)
def test_archive_artifact_from_finalized_dependency_build(self):
"""
archive_artifact_from_jenkins should get a transport, and then call
start, end and archive_artifact on the transport.
the correct storage.
"""
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir)
dependency = DependencyFactory.create()
build = BuildFactory.create(job=dependency.job)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
[item] = archive.add_build(artifact.build)[artifact]
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
archive_artifact_from_jenkins(item.pk)
self.assertEqual(
["START",
"%s -> %s root:testing" % (artifact.url, item.archived_path),
"Make %s current" % item.archived_path,
"END"],
transport.log)
def test_archive_artifact_from_finalized_projectbuild(self):
"""
If the build is complete, and the item being archived is in a FINALIZED
ProjectBuild, it should use the transport to set the current directory
correctly.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key,
phase=Build.FINALIZED)
ProjectBuildDependency.objects.create(
build=build, projectbuild=projectbuild, dependency=dependency)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
item = [x for x in archive.add_build(artifact.build)[artifact]
if x.projectbuild_dependency][0]
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
archive_artifact_from_jenkins(item.pk)
self.assertEqual(
["START",
"%s -> %s root:testing" % (artifact.url, item.archived_path),
"Make %s current" % item.archived_path,
"END"],
transport.log)
def test_archive_artifact_from_non_finalized_projectbuild(self):
"""
If the build is complete, and the item being archived is in a FINALIZED
ProjectBuild, it should use the transport to set the current directory
correctly.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency1.job, build_id=projectbuild.build_key,
phase=Build.FINALIZED)
ProjectBuildDependency.objects.create(
build=build, projectbuild=projectbuild, dependency=dependency1)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
item = [x for x in archive.add_build(artifact.build)[artifact]
if x.projectbuild_dependency][0]
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
archive_artifact_from_jenkins(item.pk)
self.assertEqual(
["START",
"%s -> %s root:testing" % (artifact.url, item.archived_path),
"END"],
transport.log)
def test_archive_artifact_from_jenkins_transport_lifecycle(self):
"""
archive_artifact_from_jenkins should get a transport, and copy the file
to the correct storage.
"""
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir)
dependency = DependencyFactory.create()
build = BuildFactory.create(job=dependency.job)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
archive.add_build(artifact.build)
[item] = list(archive.get_archived_artifacts_for_build(build))
self.assertIsNone(item.archived_at)
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
archive_artifact_from_jenkins(item.pk)
[item] = list(archive.get_archived_artifacts_for_build(build))
self.assertEqual(
["START",
"%s -> %s root:testing" % (artifact.url, item.archived_path),
"Make %s current" % item.archived_path,
"END"],
transport.log)
self.assertIsNotNone(item.archived_at)
class GenerateChecksumsTaskTest(TestCase):
def setUp(self):
self.basedir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.basedir)
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_generate_checksums(self):
"""
generate_checksums should call the generate_checksums method
on the transport from the archive with the build to generate
the checksums for.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key)
projectbuild_dependency = ProjectBuildDependency.objects.create(
build=build, projectbuild=projectbuild, dependency=dependency)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
archived_artifact = ArchiveArtifact.objects.create(
build=build, archive=archive, artifact=artifact,
archived_path="/srv/builds/200101.01/artifact_filename",
projectbuild_dependency=projectbuild_dependency)
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
generate_checksums(build.pk)
self.assertEqual(
["START", "Checksums generated for %s" % archived_artifact, "END"],
transport.log)
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_generate_checksums_no_transport(self):
"""
generate_checksums should call the generate_checksums method
on the transport from the archive with the build to generate
the checksums for. If there is no default archive, a checksum
cannot be calculated and there should be an early exit.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key)
ProjectBuildDependency.objects.create(
build=build, projectbuild=projectbuild, dependency=dependency)
ArtifactFactory.create(build=build, filename="testing/testing.txt")
# No archive defined
transport = LoggingTransport(None)
# Mock the logger
with mock.patch.object(logging, "info", return_value=None) as mock_log:
return_value = generate_checksums(build.pk)
self.assertEqual([], transport.log)
self.assertEqual(build.pk, return_value)
mock_log.assert_called_once_with(
"No default archiver - no checksum to generate")
class ProcessBuildArtifactsTaskTest(TestCase):
def setUp(self):
self.basedir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.basedir)
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_process_build_artifacts(self):
"""
process_build_artifacts is chained from the Jenkins postbuild
processing, it should arrange for the artifacts for the provided build
to be archived in the default archive.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key)
ArtifactFactory.create(
build=build, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True,
policy="cdimage")
with mock.patch("archives.transports.urllib2") as urllib2_mock:
urllib2_mock.urlopen.side_effect = lambda x: StringIO(
u"Artifact from Jenkins")
process_build_artifacts(build.pk)
[item1, item2] = list(archive.get_archived_artifacts_for_build(build))
filename = os.path.join(self.basedir, item1.archived_path)
self.assertEqual(file(filename).read(), "Artifact from Jenkins")
filename = os.path.join(self.basedir, item2.archived_path)
self.assertEqual(file(filename).read(), "Artifact from Jenkins")
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_process_build_artifacts_with_no_default_archive(self):
"""
If we have no default archive, we should log the fact that we can't
automatically archive artifacts.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key)
ArtifactFactory.create(
build=build, filename="testing/testing.txt")
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=False)
with mock.patch("archives.tasks.logging") as mock_logging:
result = process_build_artifacts.delay(build.pk)
# We must return the build.pk for further chained calls to work.
self.assertEqual(build.pk, result.get())
mock_logging.assert_has_calls([
mock.call.info(
"Processing build artifacts from build %s %d",
build, build.number),
mock.call.info(
"No default archiver - build not automatically archived.")
])
self.assertEqual(
[],
list(archive.get_archived_artifacts_for_build(build)))
@override_settings(CELERY_ALWAYS_EAGER=True)
def test_process_build_artifacts_with_multiple_artifacts(self):
"""
All the artifacts should be individually linked.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
projectbuild = build_project(project, queue_build=False)
build = BuildFactory.create(
job=dependency.job, build_id=projectbuild.build_key)
ArtifactFactory.create(
build=build, filename="testing/testing1.txt")
ArtifactFactory.create(
build=build, filename="testing/testing2.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True,
policy="cdimage")
with mock.patch("archives.transports.urllib2") as urllib2_mock:
urllib2_mock.urlopen.side_effect = lambda x: StringIO(
u"Artifact %s")
with mock.patch(
"archives.tasks.archive_artifact_from_jenkins"
) as archive_task:
with mock.patch(
"archives.tasks.link_artifact_in_archive"
) as link_task:
process_build_artifacts(build.pk)
[item1, item2, item3, item4] = list(
archive.get_archived_artifacts_for_build(build).order_by(
"artifact"))
self.assertEqual(
[mock.call(item4.pk), mock.call(item2.pk)],
archive_task.si.call_args_list)
self.assertEqual(
[mock.call(item4.pk, item3.pk), mock.call(item2.pk, item1.pk)],
link_task.si.call_args_list)
class LinkArtifactInArchiveTaskTest(LocalArchiveTestBase):
def test_link_artifact_in_archive(self):
"""
The link_artifact_in_archive task should use the transport to link the
specified artifacts.
"""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
build = BuildFactory.create(job=dependency.job, phase=Build.FINALIZED)
artifact = ArtifactFactory.create(
build=build, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
[item1, item2] = archive.add_build(artifact.build)[artifact]
item1.archived_size = 1000
item1.save()
transport = mock.Mock(spec=LocalTransport)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
link_artifact_in_archive(item1.pk, item2.pk)
transport.link_filename_to_filename.assert_called_once_with(
item1.archived_path, item2.archived_path)
transport.link_to_current.assert_called_once_with(item2.archived_path)
item1 = ArchiveArtifact.objects.get(pk=item1.pk)
self.assertEqual(1000, item1.archived_size)
def test_archive_artifact_from_non_finalized_projectbuild(self):
"""
If the build is complete, and the item being archived is in a FINALIZED
ProjectBuild, it should use the transport to set the current directory
correctly.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
projectbuild = build_project(project, queue_build=False)
BuildFactory.create(
job=dependency1.job, build_id=projectbuild.build_key,
phase=Build.STARTED)
build2 = BuildFactory.create(
job=dependency2.job, build_id=projectbuild.build_key,
phase=Build.FINALIZED)
artifact = ArtifactFactory.create(
build=build2, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build2.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
[item1, item2] = archive.add_build(artifact.build)[artifact]
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
link_artifact_in_archive(item1.pk, item2.pk)
# As this projectbuild is only partially built, we shouldn't make this
# the current build.
self.assertEqual(
["START",
"Link %s to %s" % (item1.archived_path, item2.archived_path),
"END"],
transport.log)
def test_archive_artifact_from_finalized_projectbuild(self):
"""
If the build is complete, and the item being archived is in a FINALIZED
ProjectBuild, it should use the transport to set the current directory
correctly.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
projectbuild = build_project(project, queue_build=False)
build1 = BuildFactory.create(
job=dependency1.job, build_id=projectbuild.build_key,
phase=Build.FINALIZED)
build2 = BuildFactory.create(
job=dependency2.job, build_id=projectbuild.build_key,
phase=Build.FINALIZED)
artifact = ArtifactFactory.create(
build=build2, filename="testing/testing.txt")
# We need to ensure that the artifacts are all connected up.
process_build_dependencies(build1.pk)
process_build_dependencies(build2.pk)
archive = ArchiveFactory.create(
transport="local", basedir=self.basedir, default=True)
[item1, item2] = archive.add_build(artifact.build)[artifact]
transport = LoggingTransport(archive)
with mock.patch.object(
Archive, "get_transport", return_value=transport):
link_artifact_in_archive(item1.pk, item2.pk)
# Both builds are complete, we expect this to be made the current
# build.
self.assertEqual(
["START",
"Link %s to %s" % (item1.archived_path, item2.archived_path),
"Make %s current" % item2.archived_path,
"END"],
transport.log)
|
caio1982/capomastro
|
archives/tests/test_tasks.py
|
Python
|
mit
| 22,115
|
'Hello ' + models['mvc'].encoders.html(models['name'])
|
dmaidaniuk/ozark
|
test/jsr223/src/main/webapp/WEB-INF/views/index.py
|
Python
|
apache-2.0
| 54
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cpplint.py."""
# TODO(unknown): Add a good test that tests UpdateIncludeState.
import codecs
import os
import random
import re
import sys
import unittest
import cpplint
# This class works as an error collector and replaces cpplint.Error
# function for the unit tests. We also verify each category we see
# is in cpplint._ERROR_CATEGORIES, to help keep that list up to date.
class ErrorCollector(object):
# These are a global list, covering all categories seen ever.
_ERROR_CATEGORIES = cpplint._ERROR_CATEGORIES
_SEEN_ERROR_CATEGORIES = {}
def __init__(self, assert_fn):
"""assert_fn: a function to call when we notice a problem."""
self._assert_fn = assert_fn
self._errors = []
cpplint.ResetNolintSuppressions()
def __call__(self, unused_filename, linenum,
category, confidence, message):
self._assert_fn(category in self._ERROR_CATEGORIES,
'Message "%s" has category "%s",'
' which is not in _ERROR_CATEGORIES' % (message, category))
self._SEEN_ERROR_CATEGORIES[category] = 1
if cpplint._ShouldPrintError(category, confidence, linenum):
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
def Results(self):
if len(self._errors) < 2:
return ''.join(self._errors) # Most tests expect to have a string.
else:
return self._errors # Let's give a list if there is more than one.
def ResultList(self):
return self._errors
def VerifyAllCategoriesAreSeen(self):
"""Fails if there's a category in _ERROR_CATEGORIES~_SEEN_ERROR_CATEGORIES.
This should only be called after all tests are run, so
_SEEN_ERROR_CATEGORIES has had a chance to fully populate. Since
this isn't called from within the normal unittest framework, we
can't use the normal unittest assert macros. Instead we just exit
when we see an error. Good thing this test is always run last!
"""
for category in self._ERROR_CATEGORIES:
if category not in self._SEEN_ERROR_CATEGORIES:
sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
def RemoveIfPresent(self, substr):
for (index, error) in enumerate(self._errors):
if error.find(substr) != -1:
self._errors = self._errors[0:index] + self._errors[(index + 1):]
break
# This class is a lame mock of codecs. We do not verify filename, mode, or
# encoding, but for the current use case it is not needed.
class MockIo(object):
def __init__(self, mock_file):
self.mock_file = mock_file
def open(self, # pylint: disable-msg=C6409
unused_filename, unused_mode, unused_encoding, _):
return self.mock_file
class CpplintTestBase(unittest.TestCase):
"""Provides some useful helper functions for cpplint tests."""
def setUp(self):
# Allow subclasses to cheat os.path.abspath called in FileInfo class.
self.os_path_abspath_orig = os.path.abspath
def tearDown(self):
os.path.abspath = self.os_path_abspath_orig
# Perform lint on single line of input and return the error message.
def PerformSingleLineLint(self, code):
error_collector = ErrorCollector(self.assert_)
lines = code.split('\n')
cpplint.RemoveMultiLineComments('foo.h', lines, error_collector)
clean_lines = cpplint.CleansedLines(lines)
include_state = cpplint._IncludeState()
function_state = cpplint._FunctionState()
nesting_state = cpplint.NestingState()
cpplint.ProcessLine('foo.cc', 'cc', clean_lines, 0,
include_state, function_state,
nesting_state, error_collector)
# Single-line lint tests are allowed to fail the 'unlintable function'
# check.
error_collector.RemoveIfPresent(
'Lint failed to find start of function body.')
return error_collector.Results()
# Perform lint over multiple lines and return the error message.
def PerformMultiLineLint(self, code):
error_collector = ErrorCollector(self.assert_)
lines = code.split('\n')
cpplint.RemoveMultiLineComments('foo.h', lines, error_collector)
lines = cpplint.CleansedLines(lines)
nesting_state = cpplint.NestingState()
for i in xrange(lines.NumLines()):
nesting_state.Update('foo.h', lines, i, error_collector)
cpplint.CheckStyle('foo.h', lines, i, 'h', nesting_state,
error_collector)
cpplint.CheckForNonStandardConstructs('foo.h', lines, i,
nesting_state, error_collector)
nesting_state.CheckCompletedBlocks('foo.h', error_collector)
return error_collector.Results()
# Similar to PerformMultiLineLint, but calls CheckLanguage instead of
# CheckForNonStandardConstructs
def PerformLanguageRulesCheck(self, file_name, code):
error_collector = ErrorCollector(self.assert_)
include_state = cpplint._IncludeState()
nesting_state = cpplint.NestingState()
lines = code.split('\n')
cpplint.RemoveMultiLineComments(file_name, lines, error_collector)
lines = cpplint.CleansedLines(lines)
ext = file_name[file_name.rfind('.') + 1:]
for i in xrange(lines.NumLines()):
cpplint.CheckLanguage(file_name, lines, i, ext, include_state,
nesting_state, error_collector)
return error_collector.Results()
def PerformFunctionLengthsCheck(self, code):
"""Perform Lint function length check on block of code and return warnings.
Builds up an array of lines corresponding to the code and strips comments
using cpplint functions.
Establishes an error collector and invokes the function length checking
function following cpplint's pattern.
Args:
code: C++ source code expected to generate a warning message.
Returns:
The accumulated errors.
"""
file_name = 'foo.cc'
error_collector = ErrorCollector(self.assert_)
function_state = cpplint._FunctionState()
lines = code.split('\n')
cpplint.RemoveMultiLineComments(file_name, lines, error_collector)
lines = cpplint.CleansedLines(lines)
for i in xrange(lines.NumLines()):
cpplint.CheckForFunctionLengths(file_name, lines, i,
function_state, error_collector)
return error_collector.Results()
def PerformIncludeWhatYouUse(self, code, filename='foo.h', io=codecs):
# First, build up the include state.
error_collector = ErrorCollector(self.assert_)
include_state = cpplint._IncludeState()
nesting_state = cpplint.NestingState()
lines = code.split('\n')
cpplint.RemoveMultiLineComments(filename, lines, error_collector)
lines = cpplint.CleansedLines(lines)
for i in xrange(lines.NumLines()):
cpplint.CheckLanguage(filename, lines, i, '.h', include_state,
nesting_state, error_collector)
# We could clear the error_collector here, but this should
# also be fine, since our IncludeWhatYouUse unittests do not
# have language problems.
# Second, look for missing includes.
cpplint.CheckForIncludeWhatYouUse(filename, lines, include_state,
error_collector, io)
return error_collector.Results()
# Perform lint and compare the error message with "expected_message".
def TestLint(self, code, expected_message):
self.assertEquals(expected_message, self.PerformSingleLineLint(code))
def TestMultiLineLint(self, code, expected_message):
self.assertEquals(expected_message, self.PerformMultiLineLint(code))
def TestMultiLineLintRE(self, code, expected_message_re):
message = self.PerformMultiLineLint(code)
if not re.search(expected_message_re, message):
self.fail('Message was:\n' + message + 'Expected match to "' +
expected_message_re + '"')
def TestLanguageRulesCheck(self, file_name, code, expected_message):
self.assertEquals(expected_message,
self.PerformLanguageRulesCheck(file_name, code))
def TestIncludeWhatYouUse(self, code, expected_message):
self.assertEquals(expected_message,
self.PerformIncludeWhatYouUse(code))
def TestBlankLinesCheck(self, lines, start_errors, end_errors):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc', lines, error_collector)
self.assertEquals(
start_errors,
error_collector.Results().count(
'Redundant blank line at the start of a code block '
'should be deleted. [whitespace/blank_line] [2]'))
self.assertEquals(
end_errors,
error_collector.Results().count(
'Redundant blank line at the end of a code block '
'should be deleted. [whitespace/blank_line] [3]'))
class CpplintTest(CpplintTestBase):
def GetNamespaceResults(self, lines):
error_collector = ErrorCollector(self.assert_)
cpplint.RemoveMultiLineComments('foo.h', lines, error_collector)
lines = cpplint.CleansedLines(lines)
nesting_state = cpplint.NestingState()
for i in xrange(lines.NumLines()):
nesting_state.Update('foo.h', lines, i, error_collector)
cpplint.CheckForNamespaceIndentation('foo.h', nesting_state,
lines, i, error_collector)
return error_collector.Results()
def testForwardDeclarationNameSpaceIndentation(self):
lines = ['namespace Test {',
' class ForwardDeclaration;',
'} // namespace Test']
results = self.GetNamespaceResults(lines)
self.assertEquals(results, 'Do not indent within a namespace '
' [runtime/indentation_namespace] [4]')
def testNameSpaceIndentationForClass(self):
lines = ['namespace Test {',
'void foo() { }',
' class Test {',
' };',
'} // namespace Test']
results = self.GetNamespaceResults(lines)
self.assertEquals(results, 'Do not indent within a namespace '
' [runtime/indentation_namespace] [4]')
def testNameSpaceIndentationNoError(self):
lines = ['namespace Test {',
'void foo() { }',
'} // namespace Test']
results = self.GetNamespaceResults(lines)
self.assertEquals(results, '')
def testWhitespaceBeforeNamespace(self):
lines = [' namespace Test {',
' void foo() { }',
' } // namespace Test']
results = self.GetNamespaceResults(lines)
self.assertEquals(results, '')
def testFalsePositivesNoError(self):
lines = ['namespace Test {',
'struct OuterClass {',
' struct NoFalsePositivesHere;',
' struct NoFalsePositivesHere member_variable;',
'};',
'} // namespace Test']
results = self.GetNamespaceResults(lines)
self.assertEquals(results, '')
# Test get line width.
def testGetLineWidth(self):
self.assertEquals(0, cpplint.GetLineWidth(''))
self.assertEquals(10, cpplint.GetLineWidth(u'x' * 10))
self.assertEquals(16, cpplint.GetLineWidth(u'都|道|府|県|支庁'))
def testGetTextInside(self):
self.assertEquals('', cpplint._GetTextInside('fun()', r'fun\('))
self.assertEquals('x, y', cpplint._GetTextInside('f(x, y)', r'f\('))
self.assertEquals('a(), b(c())', cpplint._GetTextInside(
'printf(a(), b(c()))', r'printf\('))
self.assertEquals('x, y{}', cpplint._GetTextInside('f[x, y{}]', r'f\['))
self.assertEquals(None, cpplint._GetTextInside('f[a, b(}]', r'f\['))
self.assertEquals(None, cpplint._GetTextInside('f[x, y]', r'f\('))
self.assertEquals('y, h(z, (a + b))', cpplint._GetTextInside(
'f(x, g(y, h(z, (a + b))))', r'g\('))
self.assertEquals('f(f(x))', cpplint._GetTextInside('f(f(f(x)))', r'f\('))
# Supports multiple lines.
self.assertEquals('\n return loop(x);\n',
cpplint._GetTextInside(
'int loop(int x) {\n return loop(x);\n}\n', r'\{'))
# '^' matches the beginning of each line.
self.assertEquals('x, y',
cpplint._GetTextInside(
'#include "inl.h" // skip #define\n'
'#define A2(x, y) a_inl_(x, y, __LINE__)\n'
'#define A(x) a_inl_(x, "", __LINE__)\n',
r'^\s*#define\s*\w+\('))
def testFindNextMultiLineCommentStart(self):
self.assertEquals(1, cpplint.FindNextMultiLineCommentStart([''], 0))
lines = ['a', 'b', '/* c']
self.assertEquals(2, cpplint.FindNextMultiLineCommentStart(lines, 0))
lines = ['char a[] = "/*";'] # not recognized as comment.
self.assertEquals(1, cpplint.FindNextMultiLineCommentStart(lines, 0))
def testFindNextMultiLineCommentEnd(self):
self.assertEquals(1, cpplint.FindNextMultiLineCommentEnd([''], 0))
lines = ['a', 'b', ' c */']
self.assertEquals(2, cpplint.FindNextMultiLineCommentEnd(lines, 0))
def testRemoveMultiLineCommentsFromRange(self):
lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b']
cpplint.RemoveMultiLineCommentsFromRange(lines, 1, 4)
self.assertEquals(['a', '/**/', '/**/', '/**/', 'b'], lines)
def testSpacesAtEndOfLine(self):
self.TestLint(
'// Hello there ',
'Line ends in whitespace. Consider deleting these extra spaces.'
' [whitespace/end_of_line] [4]')
# Test line length check.
def testLineLengthCheck(self):
self.TestLint(
'// Hello',
'')
self.TestLint(
'// x' + ' x' * 40,
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestLint(
'// x' + ' x' * 50,
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestLint(
'// //some/path/to/f' + ('i' * 100) + 'le',
'')
self.TestLint(
'// //some/path/to/f' + ('i' * 100) + 'le',
'')
self.TestLint(
'// //some/path/to/f' + ('i' * 50) + 'le and some comments',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestLint(
'// http://g' + ('o' * 100) + 'gle.com/',
'')
self.TestLint(
'// https://g' + ('o' * 100) + 'gle.com/',
'')
self.TestLint(
'// https://g' + ('o' * 60) + 'gle.com/ and some comments',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestLint(
'// Read https://g' + ('o' * 60) + 'gle.com/',
'')
self.TestLint(
'// $Id: g' + ('o' * 80) + 'gle.cc#1 $',
'')
self.TestLint(
'// $Id: g' + ('o' * 80) + 'gle.cc#1',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestMultiLineLint(
'static const char kCStr[] = "g' + ('o' * 50) + 'gle";\n',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestMultiLineLint(
'static const char kRawStr[] = R"(g' + ('o' * 50) + 'gle)";\n',
'') # no warning because raw string content is elided
self.TestMultiLineLint(
'static const char kMultiLineRawStr[] = R"(\n'
'g' + ('o' * 80) + 'gle\n'
')";',
'')
self.TestMultiLineLint(
'static const char kL' + ('o' * 50) + 'ngIdentifier[] = R"()";\n',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
# Test error suppression annotations.
def testErrorSuppression(self):
# Two errors on same line:
self.TestLint(
'long a = (int64) 65;',
['Using C-style cast. Use static_cast<int64>(...) instead'
' [readability/casting] [4]',
'Use int16/int64/etc, rather than the C type long'
' [runtime/int] [4]',
])
# One category of error suppressed:
self.TestLint(
'long a = (int64) 65; // NOLINT(runtime/int)',
'Using C-style cast. Use static_cast<int64>(...) instead'
' [readability/casting] [4]')
# All categories suppressed: (two aliases)
self.TestLint('long a = (int64) 65; // NOLINT', '')
self.TestLint('long a = (int64) 65; // NOLINT(*)', '')
# Malformed NOLINT directive:
self.TestLint(
'long a = 65; // NOLINT(foo)',
['Unknown NOLINT error category: foo'
' [readability/nolint] [5]',
'Use int16/int64/etc, rather than the C type long [runtime/int] [4]',
])
# Irrelevant NOLINT directive has no effect:
self.TestLint(
'long a = 65; // NOLINT(readability/casting)',
'Use int16/int64/etc, rather than the C type long'
' [runtime/int] [4]')
# NOLINTNEXTLINE silences warning for the next line instead of current line
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('test.cc', 'cc',
['// Copyright 2014 Your Company.',
'// NOLINTNEXTLINE(whitespace/line_length)',
'// ./command' + (' -verbose' * 80),
''],
error_collector)
self.assertEquals('', error_collector.Results())
# LINT_C_FILE silences cast warnings for entire file.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('test.h', 'h',
['// Copyright 2014 Your Company.',
'// NOLINT(build/header_guard)',
'int64 a = (uint64) 65;',
'// LINT_C_FILE',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# Vim modes silence cast warnings for entire file.
for modeline in ['vi:filetype=c',
'vi:sw=8 filetype=c',
'vi:sw=8 filetype=c ts=8',
'vi: filetype=c',
'vi: sw=8 filetype=c',
'vi: sw=8 filetype=c ts=8',
'vim:filetype=c',
'vim:sw=8 filetype=c',
'vim:sw=8 filetype=c ts=8',
'vim: filetype=c',
'vim: sw=8 filetype=c',
'vim: sw=8 filetype=c ts=8',
'vim: set filetype=c:',
'vim: set sw=8 filetype=c:',
'vim: set sw=8 filetype=c ts=8:',
'vim: set filetype=c :',
'vim: set sw=8 filetype=c :',
'vim: set sw=8 filetype=c ts=8 :',
'vim: se filetype=c:',
'vim: se sw=8 filetype=c:',
'vim: se sw=8 filetype=c ts=8:',
'vim: se filetype=c :',
'vim: se sw=8 filetype=c :',
'vim: se sw=8 filetype=c ts=8 :']:
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('test.h', 'h',
['// Copyright 2014 Your Company.',
'// NOLINT(build/header_guard)',
'int64 a = (uint64) 65;',
'/* Prevent warnings about the modeline',
modeline,
'*/',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# LINT_KERNEL_FILE silences whitespace/tab warnings for entire file.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('test.h', 'h',
['// Copyright 2014 Your Company.',
'// NOLINT(build/header_guard)',
'struct test {',
'\tint member;',
'};',
'// LINT_KERNEL_FILE',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# NOLINT, NOLINTNEXTLINE silences the readability/braces warning for "};".
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('test.cc', 'cc',
['// Copyright 2014 Your Company.',
'for (int i = 0; i != 100; ++i) {',
'\tstd::cout << i << std::endl;',
'}; // NOLINT',
'for (int i = 0; i != 100; ++i) {',
'\tstd::cout << i << std::endl;',
'// NOLINTNEXTLINE',
'};',
'// LINT_KERNEL_FILE',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# Test Variable Declarations.
def testVariableDeclarations(self):
self.TestLint(
'long a = 65;',
'Use int16/int64/etc, rather than the C type long'
' [runtime/int] [4]')
self.TestLint(
'long double b = 65.0;',
'')
self.TestLint(
'long long aa = 6565;',
'Use int16/int64/etc, rather than the C type long'
' [runtime/int] [4]')
# Test C-style cast cases.
def testCStyleCast(self):
self.TestLint(
'int a = (int)1.0;',
'Using C-style cast. Use static_cast<int>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'int a = (int)-1.0;',
'Using C-style cast. Use static_cast<int>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'int *a = (int *)NULL;',
'Using C-style cast. Use reinterpret_cast<int *>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'uint16 a = (uint16)1.0;',
'Using C-style cast. Use static_cast<uint16>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'int32 a = (int32)1.0;',
'Using C-style cast. Use static_cast<int32>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'uint64 a = (uint64)1.0;',
'Using C-style cast. Use static_cast<uint64>(...) instead'
' [readability/casting] [4]')
# These shouldn't be recognized casts.
self.TestLint('u a = (u)NULL;', '')
self.TestLint('uint a = (uint)NULL;', '')
self.TestLint('typedef MockCallback<int(int)> CallbackType;', '')
self.TestLint('scoped_ptr< MockCallback<int(int)> > callback_value;', '')
self.TestLint('std::function<int(bool)>', '')
self.TestLint('x = sizeof(int)', '')
self.TestLint('x = alignof(int)', '')
self.TestLint('alignas(int) char x[42]', '')
self.TestLint('alignas(alignof(x)) char y[42]', '')
self.TestLint('void F(int (func)(int));', '')
self.TestLint('void F(int (func)(int*));', '')
self.TestLint('void F(int (Class::member)(int));', '')
self.TestLint('void F(int (Class::member)(int*));', '')
self.TestLint('void F(int (Class::member)(int), int param);', '')
self.TestLint('void F(int (Class::member)(int*), int param);', '')
# These should not be recognized (lambda functions without arg names).
self.TestLint('[](int/*unused*/) -> bool {', '')
self.TestLint('[](int /*unused*/) -> bool {', '')
self.TestLint('auto f = [](MyStruct* /*unused*/)->int {', '')
self.TestLint('[](int) -> bool {', '')
self.TestLint('auto f = [](MyStruct*)->int {', '')
# Cast with brace initializers
self.TestLint('int64_t{4096} * 1000 * 1000', '')
self.TestLint('size_t{4096} * 1000 * 1000', '')
self.TestLint('uint_fast16_t{4096} * 1000 * 1000', '')
# Brace initializer with templated type
self.TestMultiLineLint(
"""
template <typename Type1,
typename Type2>
void Function(int arg1,
int arg2) {
variable &= ~Type1{0} - 1;
}""",
'')
self.TestMultiLineLint(
"""
template <typename Type>
class Class {
void Function() {
variable &= ~Type{0} - 1;
}
};""",
'')
self.TestMultiLineLint(
"""
template <typename Type>
class Class {
void Function() {
variable &= ~Type{0} - 1;
}
};""",
'')
self.TestMultiLineLint(
"""
namespace {
template <typename Type>
class Class {
void Function() {
if (block) {
variable &= ~Type{0} - 1;
}
}
};
}""",
'')
# Test taking address of casts (runtime/casting)
def testRuntimeCasting(self):
error_msg = ('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'
' [runtime/casting] [4]')
self.TestLint('int* x = &static_cast<int*>(foo);', error_msg)
self.TestLint('int* x = &reinterpret_cast<int *>(foo);', error_msg)
self.TestLint('int* x = &(int*)foo;',
['Using C-style cast. Use reinterpret_cast<int*>(...) '
'instead [readability/casting] [4]',
error_msg])
self.TestLint('BudgetBuckets&(BudgetWinHistory::*BucketFn)(void) const;',
'')
self.TestLint('&(*func_ptr)(arg)', '')
self.TestLint('Compute(arg, &(*func_ptr)(i, j));', '')
# Alternative error message
alt_error_msg = ('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'
' [readability/casting] [4]')
self.TestLint('int* x = &down_cast<Obj*>(obj)->member_;', alt_error_msg)
self.TestLint('int* x = &down_cast<Obj*>(obj)[index];', alt_error_msg)
self.TestLint('int* x = &(down_cast<Obj*>(obj)->member_);', '')
self.TestLint('int* x = &(down_cast<Obj*>(obj)[index]);', '')
self.TestLint('int* x = &down_cast<Obj*>(obj)\n->member_;', alt_error_msg)
self.TestLint('int* x = &(down_cast<Obj*>(obj)\n->member_);', '')
# It's OK to cast an address.
self.TestLint('int* x = reinterpret_cast<int *>(&foo);', '')
# Function pointers returning references should not be confused
# with taking address of old-style casts.
self.TestLint('auto x = implicit_cast<string &(*)(int)>(&foo);', '')
def testRuntimeSelfinit(self):
self.TestLint(
'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }',
'You seem to be initializing a member variable with itself.'
' [runtime/init] [4]')
self.TestLint(
'Foo::Foo(Bar r, Bel l) : r_(CHECK_NOTNULL(r_)) { }',
'You seem to be initializing a member variable with itself.'
' [runtime/init] [4]')
self.TestLint(
'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }',
'')
self.TestLint(
'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }',
'')
# Test for unnamed arguments in a method.
def testCheckForUnnamedParams(self):
self.TestLint('virtual void Func(int*) const;', '')
self.TestLint('virtual void Func(int*);', '')
self.TestLint('void Method(char*) {', '')
self.TestLint('void Method(char*);', '')
self.TestLint('static void operator delete[](void*) throw();', '')
self.TestLint('int Method(int);', '')
self.TestLint('virtual void Func(int* p);', '')
self.TestLint('void operator delete(void* x) throw();', '')
self.TestLint('void Method(char* x) {', '')
self.TestLint('void Method(char* /*x*/) {', '')
self.TestLint('void Method(char* x);', '')
self.TestLint('typedef void (*Method)(int32 x);', '')
self.TestLint('static void operator delete[](void* x) throw();', '')
self.TestLint('static void operator delete[](void* /*x*/) throw();', '')
self.TestLint('X operator++(int);', '')
self.TestLint('X operator++(int) {', '')
self.TestLint('X operator--(int);', '')
self.TestLint('X operator--(int /*unused*/) {', '')
self.TestLint('MACRO(int);', '')
self.TestLint('MACRO(func(int));', '')
self.TestLint('MACRO(arg, func(int));', '')
self.TestLint('void (*func)(void*);', '')
self.TestLint('void Func((*func)(void*)) {}', '')
self.TestLint('template <void Func(void*)> void func();', '')
self.TestLint('virtual void f(int /*unused*/) {', '')
self.TestLint('void f(int /*unused*/) override {', '')
self.TestLint('void f(int /*unused*/) final {', '')
# Test deprecated casts such as int(d)
def testDeprecatedCast(self):
self.TestLint(
'int a = int(2.2);',
'Using deprecated casting style. '
'Use static_cast<int>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'(char *) "foo"',
'Using C-style cast. '
'Use const_cast<char *>(...) instead'
' [readability/casting] [4]')
self.TestLint(
'(int*)foo',
'Using C-style cast. '
'Use reinterpret_cast<int*>(...) instead'
' [readability/casting] [4]')
# Checks for false positives...
self.TestLint('int a = int();', '') # constructor
self.TestLint('X::X() : a(int()) {}', '') # default constructor
self.TestLint('operator bool();', '') # Conversion operator
self.TestLint('new int64(123);', '') # "new" operator on basic type
self.TestLint('new int64(123);', '') # "new" operator on basic type
self.TestLint('new const int(42);', '') # "new" on const-qualified type
self.TestLint('using a = bool(int arg);', '') # C++11 alias-declaration
self.TestLint('x = bit_cast<double(*)[3]>(y);', '') # array of array
self.TestLint('void F(const char(&src)[N]);', '') # array of references
# Placement new
self.TestLint(
'new(field_ptr) int(field->default_value_enum()->number());',
'')
# C++11 function wrappers
self.TestLint('std::function<int(bool)>', '')
self.TestLint('std::function<const int(bool)>', '')
self.TestLint('std::function< int(bool) >', '')
self.TestLint('mfunction<int(bool)>', '')
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'test.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'typedef std::function<',
' bool(int)> F;',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# Return types for function pointers
self.TestLint('typedef bool(FunctionPointer)();', '')
self.TestLint('typedef bool(FunctionPointer)(int param);', '')
self.TestLint('typedef bool(MyClass::*MemberFunctionPointer)();', '')
self.TestLint('typedef bool(MyClass::* MemberFunctionPointer)();', '')
self.TestLint('typedef bool(MyClass::*MemberFunctionPointer)() const;', '')
self.TestLint('void Function(bool(FunctionPointerArg)());', '')
self.TestLint('void Function(bool(FunctionPointerArg)()) {}', '')
self.TestLint('typedef set<int64, bool(*)(int64, int64)> SortedIdSet', '')
self.TestLint(
'bool TraverseNode(T *Node, bool(VisitorBase:: *traverse) (T *t)) {}',
'')
# The second parameter to a gMock method definition is a function signature
# that often looks like a bad cast but should not picked up by lint.
def testMockMethod(self):
self.TestLint(
'MOCK_METHOD0(method, int());',
'')
self.TestLint(
'MOCK_CONST_METHOD1(method, float(string));',
'')
self.TestLint(
'MOCK_CONST_METHOD2_T(method, double(float, float));',
'')
self.TestLint(
'MOCK_CONST_METHOD1(method, SomeType(int));',
'')
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('mock.cc', 'cc',
['MOCK_METHOD1(method1,',
' bool(int));',
'MOCK_METHOD1(',
' method2,',
' bool(int));',
'MOCK_CONST_METHOD2(',
' method3, bool(int,',
' int));',
'MOCK_METHOD1(method4, int(bool));',
'const int kConstant = int(42);'], # true positive
error_collector)
self.assertEquals(
0,
error_collector.Results().count(
('Using deprecated casting style. '
'Use static_cast<bool>(...) instead '
'[readability/casting] [4]')))
self.assertEquals(
1,
error_collector.Results().count(
('Using deprecated casting style. '
'Use static_cast<int>(...) instead '
'[readability/casting] [4]')))
# Like gMock method definitions, MockCallback instantiations look very similar
# to bad casts.
def testMockCallback(self):
self.TestLint(
'MockCallback<bool(int)>',
'')
self.TestLint(
'MockCallback<int(float, char)>',
'')
# Test false errors that happened with some include file names
def testIncludeFilenameFalseError(self):
self.TestLint(
'#include "foo/long-foo.h"',
'')
self.TestLint(
'#include "foo/sprintf.h"',
'')
# Test typedef cases. There was a bug that cpplint misidentified
# typedef for pointer to function as C-style cast and produced
# false-positive error messages.
def testTypedefForPointerToFunction(self):
self.TestLint(
'typedef void (*Func)(int x);',
'')
self.TestLint(
'typedef void (*Func)(int *x);',
'')
self.TestLint(
'typedef void Func(int x);',
'')
self.TestLint(
'typedef void Func(int *x);',
'')
def testIncludeWhatYouUseNoImplementationFiles(self):
code = 'std::vector<int> foo;'
self.assertEquals('Add #include <vector> for vector<>'
' [build/include_what_you_use] [4]',
self.PerformIncludeWhatYouUse(code, 'foo.h'))
self.assertEquals('',
self.PerformIncludeWhatYouUse(code, 'foo.cc'))
def testIncludeWhatYouUse(self):
self.TestIncludeWhatYouUse(
"""#include <vector>
std::vector<int> foo;
""",
'')
self.TestIncludeWhatYouUse(
"""#include <map>
std::pair<int,int> foo;
""",
'Add #include <utility> for pair<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <multimap>
std::pair<int,int> foo;
""",
'Add #include <utility> for pair<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <hash_map>
std::pair<int,int> foo;
""",
'Add #include <utility> for pair<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <hash_map>
auto foo = std::make_pair(1, 2);
""",
'Add #include <utility> for make_pair'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <utility>
std::pair<int,int> foo;
""",
'')
self.TestIncludeWhatYouUse(
"""#include <vector>
DECLARE_string(foobar);
""",
'')
self.TestIncludeWhatYouUse(
"""#include <vector>
DEFINE_string(foobar, "", "");
""",
'')
self.TestIncludeWhatYouUse(
"""#include <vector>
std::pair<int,int> foo;
""",
'Add #include <utility> for pair<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
std::vector<int> foo;
""",
'Add #include <vector> for vector<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <vector>
std::set<int> foo;
""",
'Add #include <set> for set<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
hash_map<int, int> foobar;
""",
'Add #include <hash_map> for hash_map<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/containers/hash_tables.h"
base::hash_map<int, int> foobar;
""",
'')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
bool foobar = std::less<int>(0,1);
""",
'Add #include <functional> for less<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
bool foobar = min<int>(0,1);
""",
'Add #include <algorithm> for min [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
'void a(const string &foobar);',
'Add #include <string> for string [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
'void a(const std::string &foobar);',
'Add #include <string> for string [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
'void a(const my::string &foobar);',
'') # Avoid false positives on strings in other namespaces.
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
bool foobar = swap(0,1);
""",
'Add #include <utility> for swap [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
bool foobar = transform(a.begin(), a.end(), b.start(), Foo);
""",
'Add #include <algorithm> for transform '
'[build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include "base/foobar.h"
bool foobar = min_element(a.begin(), a.end());
""",
'Add #include <algorithm> for min_element '
'[build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""foo->swap(0,1);
foo.swap(0,1);
""",
'')
self.TestIncludeWhatYouUse(
"""#include <string>
void a(const std::multimap<int,string> &foobar);
""",
'Add #include <map> for multimap<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <string>
void a(const std::unordered_map<int,string> &foobar);
""",
'Add #include <unordered_map> for unordered_map<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <string>
void a(const std::unordered_set<int> &foobar);
""",
'Add #include <unordered_set> for unordered_set<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <queue>
void a(const std::priority_queue<int> &foobar);
""",
'')
self.TestIncludeWhatYouUse(
"""#include <assert.h>
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/port.h"
vector<string> hajoa;""", '')
self.TestIncludeWhatYouUse(
"""#include <string>
int i = numeric_limits<int>::max()
""",
'Add #include <limits> for numeric_limits<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <limits>
int i = numeric_limits<int>::max()
""",
'')
self.TestIncludeWhatYouUse(
"""#include <string>
std::unique_ptr<int> x;
""",
'Add #include <memory> for unique_ptr<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <string>
auto x = std::make_unique<int>(0);
""",
'Add #include <memory> for make_unique<>'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <vector>
vector<int> foo(vector<int> x) { return std::move(x); }
""",
'Add #include <utility> for move'
' [build/include_what_you_use] [4]')
self.TestIncludeWhatYouUse(
"""#include <string>
int a, b;
std::swap(a, b);
""",
'Add #include <utility> for swap'
' [build/include_what_you_use] [4]')
# Test the UpdateIncludeState code path.
mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"']
message = self.PerformIncludeWhatYouUse(
'#include "blah/a.h"',
filename='blah/a.cc',
io=MockIo(mock_header_contents))
self.assertEquals(message, '')
mock_header_contents = ['#include <set>']
message = self.PerformIncludeWhatYouUse(
"""#include "blah/a.h"
std::set<int> foo;""",
filename='blah/a.cc',
io=MockIo(mock_header_contents))
self.assertEquals(message, '')
# Make sure we can find the correct header file if the cc file seems to be
# a temporary file generated by Emacs's flymake.
mock_header_contents = ['']
message = self.PerformIncludeWhatYouUse(
"""#include "blah/a.h"
std::set<int> foo;""",
filename='blah/a_flymake.cc',
io=MockIo(mock_header_contents))
self.assertEquals(message, 'Add #include <set> for set<> '
'[build/include_what_you_use] [4]')
# If there's just a cc and the header can't be found then it's ok.
message = self.PerformIncludeWhatYouUse(
"""#include "blah/a.h"
std::set<int> foo;""",
filename='blah/a.cc')
self.assertEquals(message, '')
# Make sure we find the headers with relative paths.
mock_header_contents = ['']
message = self.PerformIncludeWhatYouUse(
"""#include "%s/a.h"
std::set<int> foo;""" % os.path.basename(os.getcwd()),
filename='a.cc',
io=MockIo(mock_header_contents))
self.assertEquals(message, 'Add #include <set> for set<> '
'[build/include_what_you_use] [4]')
def testFilesBelongToSameModule(self):
f = cpplint.FilesBelongToSameModule
self.assertEquals((True, ''), f('a.cc', 'a.h'))
self.assertEquals((True, ''), f('base/google.cc', 'base/google.h'))
self.assertEquals((True, ''), f('base/google_test.cc', 'base/google.h'))
self.assertEquals((True, ''),
f('base/google_unittest.cc', 'base/google.h'))
self.assertEquals((True, ''),
f('base/internal/google_unittest.cc',
'base/public/google.h'))
self.assertEquals((True, 'xxx/yyy/'),
f('xxx/yyy/base/internal/google_unittest.cc',
'base/public/google.h'))
self.assertEquals((True, 'xxx/yyy/'),
f('xxx/yyy/base/google_unittest.cc',
'base/public/google.h'))
self.assertEquals((True, ''),
f('base/google_unittest.cc', 'base/google-inl.h'))
self.assertEquals((True, '/home/build/google3/'),
f('/home/build/google3/base/google.cc', 'base/google.h'))
self.assertEquals((False, ''),
f('/home/build/google3/base/google.cc', 'basu/google.h'))
self.assertEquals((False, ''), f('a.cc', 'b.h'))
def testCleanseLine(self):
self.assertEquals('int foo = 0;',
cpplint.CleanseComments('int foo = 0; // danger!'))
self.assertEquals('int o = 0;',
cpplint.CleanseComments('int /* foo */ o = 0;'))
self.assertEquals('foo(int a, int b);',
cpplint.CleanseComments('foo(int a /* abc */, int b);'))
self.assertEqual('f(a, b);',
cpplint.CleanseComments('f(a, /* name */ b);'))
self.assertEqual('f(a, b);',
cpplint.CleanseComments('f(a /* name */, b);'))
self.assertEqual('f(a, b);',
cpplint.CleanseComments('f(a, /* name */b);'))
self.assertEqual('f(a, b, c);',
cpplint.CleanseComments('f(a, /**/b, /**/c);'))
self.assertEqual('f(a, b, c);',
cpplint.CleanseComments('f(a, /**/b/**/, c);'))
def testRawStrings(self):
self.TestMultiLineLint(
"""
void Func() {
static const char kString[] = R"(
#endif <- invalid preprocessor should be ignored
*/ <- invalid comment should be ignored too
)";
}""",
'')
self.TestMultiLineLint(
"""
void Func() {
string s = R"TrueDelimiter(
)"
)FalseDelimiter"
)TrueDelimiter";
}""",
'')
self.TestMultiLineLint(
"""
void Func() {
char char kString[] = R"( ";" )";
}""",
'')
self.TestMultiLineLint(
"""
static const char kRawString[] = R"(
\tstatic const int kLineWithTab = 1;
static const int kLineWithTrailingWhiteSpace = 1;\x20
void WeirdNumberOfSpacesAtLineStart() {
string x;
x += StrCat("Use StrAppend instead");
}
void BlankLineAtEndOfBlock() {
// TODO incorrectly formatted
//Badly formatted comment
}
)";""",
'')
self.TestMultiLineLint(
"""
void Func() {
string s = StrCat(R"TrueDelimiter(
)"
)FalseDelimiter"
)TrueDelimiter", R"TrueDelimiter2(
)"
)FalseDelimiter2"
)TrueDelimiter2");
}""",
'')
self.TestMultiLineLint(
"""
static SomeStruct kData = {
{0, R"(line1
line2
)"}
};""",
'')
def testMultiLineComments(self):
# missing explicit is bad
self.TestMultiLineLint(
r"""int a = 0;
/* multi-liner
class Foo {
Foo(int f); // should cause a lint warning in code
}
*/ """,
'')
self.TestMultiLineLint(
r"""/* int a = 0; multi-liner
static const int b = 0;""",
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
self.TestMultiLineLint(r""" /* multi-line comment""",
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
self.TestMultiLineLint(r""" // /* comment, but not multi-line""", '')
self.TestMultiLineLint(r"""/**********
*/""", '')
self.TestMultiLineLint(r"""/**
* Doxygen comment
*/""",
'')
self.TestMultiLineLint(r"""/*!
* Doxygen comment
*/""",
'')
def testMultilineStrings(self):
multiline_string_error_message = (
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.'
' [readability/multiline_string] [5]')
file_path = 'mydir/foo.cc'
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'cc',
['const char* str = "This is a\\',
' multiline string.";'],
error_collector)
self.assertEquals(
2, # One per line.
error_collector.ResultList().count(multiline_string_error_message))
# Test non-explicit single-argument constructors
def testExplicitSingleArgumentConstructors(self):
old_verbose_level = cpplint._cpplint_state.verbose_level
cpplint._cpplint_state.verbose_level = 0
try:
# missing explicit is bad
self.TestMultiLineLint(
"""
class Foo {
Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# missing explicit is bad, even with whitespace
self.TestMultiLineLint(
"""
class Foo {
Foo (int f);
};""",
['Extra space before ( in function call [whitespace/parens] [4]',
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]'])
# missing explicit, with distracting comment, is still bad
self.TestMultiLineLint(
"""
class Foo {
Foo(int f); // simpler than Foo(blargh, blarg)
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# missing explicit, with qualified classname
self.TestMultiLineLint(
"""
class Qualifier::AnotherOne::Foo {
Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# missing explicit for inline constructors is bad as well
self.TestMultiLineLint(
"""
class Foo {
inline Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# structs are caught as well.
self.TestMultiLineLint(
"""
struct Foo {
Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# Templatized classes are caught as well.
self.TestMultiLineLint(
"""
template<typename T> class Foo {
Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# inline case for templatized classes.
self.TestMultiLineLint(
"""
template<typename T> class Foo {
inline Foo(int f);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# constructors with a default argument should still be marked explicit
self.TestMultiLineLint(
"""
class Foo {
Foo(int f = 0);
};""",
'Constructors callable with one argument should be marked explicit.'
' [runtime/explicit] [5]')
# multi-argument constructors with all but one default argument should be
# marked explicit
self.TestMultiLineLint(
"""
class Foo {
Foo(int f, int g = 0);
};""",
'Constructors callable with one argument should be marked explicit.'
' [runtime/explicit] [5]')
# multi-argument constructors with all default arguments should be marked
# explicit
self.TestMultiLineLint(
"""
class Foo {
Foo(int f = 0, int g = 0);
};""",
'Constructors callable with one argument should be marked explicit.'
' [runtime/explicit] [5]')
# explicit no-argument constructors are bad
self.TestMultiLineLint(
"""
class Foo {
explicit Foo();
};""",
'Zero-parameter constructors should not be marked explicit.'
' [runtime/explicit] [5]')
# void constructors are considered no-argument
self.TestMultiLineLint(
"""
class Foo {
explicit Foo(void);
};""",
'Zero-parameter constructors should not be marked explicit.'
' [runtime/explicit] [5]')
# No warning for multi-parameter constructors
self.TestMultiLineLint(
"""
class Foo {
explicit Foo(int f, int g);
};""",
'')
self.TestMultiLineLint(
"""
class Foo {
explicit Foo(int f, int g = 0);
};""",
'')
# single-argument constructors that take a function that takes multiple
# arguments should be explicit
self.TestMultiLineLint(
"""
class Foo {
Foo(void (*f)(int f, int g));
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# single-argument constructors that take a single template argument with
# multiple parameters should be explicit
self.TestMultiLineLint(
"""
template <typename T, typename S>
class Foo {
Foo(Bar<T, S> b);
};""",
'Single-parameter constructors should be marked explicit.'
' [runtime/explicit] [5]')
# but copy constructors that take multiple template parameters are OK
self.TestMultiLineLint(
"""
template <typename T, S>
class Foo {
Foo(Foo<T, S>& f);
};""",
'')
# proper style is okay
self.TestMultiLineLint(
"""
class Foo {
explicit Foo(int f);
};""",
'')
# two argument constructor is okay
self.TestMultiLineLint(
"""
class Foo {
Foo(int f, int b);
};""",
'')
# two argument constructor, across two lines, is okay
self.TestMultiLineLint(
"""
class Foo {
Foo(int f,
int b);
};""",
'')
# non-constructor (but similar name), is okay
self.TestMultiLineLint(
"""
class Foo {
aFoo(int f);
};""",
'')
# constructor with void argument is okay
self.TestMultiLineLint(
"""
class Foo {
Foo(void);
};""",
'')
# single argument method is okay
self.TestMultiLineLint(
"""
class Foo {
Bar(int b);
};""",
'')
# comments should be ignored
self.TestMultiLineLint(
"""
class Foo {
// Foo(int f);
};""",
'')
# single argument function following class definition is okay
# (okay, it's not actually valid, but we don't want a false positive)
self.TestMultiLineLint(
"""
class Foo {
Foo(int f, int b);
};
Foo(int f);""",
'')
# single argument function is okay
self.TestMultiLineLint(
"""static Foo(int f);""",
'')
# single argument copy constructor is okay.
self.TestMultiLineLint(
"""
class Foo {
Foo(const Foo&);
};""",
'')
self.TestMultiLineLint(
"""
class Foo {
Foo(Foo const&);
};""",
'')
self.TestMultiLineLint(
"""
class Foo {
Foo(Foo&);
};""",
'')
# templatized copy constructor is okay.
self.TestMultiLineLint(
"""
template<typename T> class Foo {
Foo(const Foo<T>&);
};""",
'')
# Special case for std::initializer_list
self.TestMultiLineLint(
"""
class Foo {
Foo(std::initializer_list<T> &arg) {}
};""",
'')
# Anything goes inside an assembly block
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['void Func() {',
' __asm__ (',
' "hlt"',
' );',
' asm {',
' movdqa [edx + 32], xmm2',
' }',
'}'],
error_collector)
self.assertEquals(
0,
error_collector.ResultList().count(
'Extra space before ( in function call [whitespace/parens] [4]'))
self.assertEquals(
0,
error_collector.ResultList().count(
'Closing ) should be moved to the previous line '
'[whitespace/parens] [2]'))
self.assertEquals(
0,
error_collector.ResultList().count(
'Extra space before [ [whitespace/braces] [5]'))
finally:
cpplint._cpplint_state.verbose_level = old_verbose_level
def testSlashStarCommentOnSingleLine(self):
self.TestMultiLineLint(
"""/* static */ Foo(int f);""",
'')
self.TestMultiLineLint(
"""/*/ static */ Foo(int f);""",
'')
self.TestMultiLineLint(
"""/*/ static Foo(int f);""",
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
self.TestMultiLineLint(
""" /*/ static Foo(int f);""",
'Could not find end of multi-line comment'
' [readability/multiline_comment] [5]')
self.TestMultiLineLint(
""" /**/ static Foo(int f);""",
'')
# Test suspicious usage of "if" like this:
# if (a == b) {
# DoSomething();
# } if (a == c) { // Should be "else if".
# DoSomething(); // This gets called twice if a == b && a == c.
# }
def testSuspiciousUsageOfIf(self):
self.TestLint(
' if (a == b) {',
'')
self.TestLint(
' } if (a == b) {',
'Did you mean "else if"? If not, start a new line for "if".'
' [readability/braces] [4]')
# Test suspicious usage of memset. Specifically, a 0
# as the final argument is almost certainly an error.
def testSuspiciousUsageOfMemset(self):
# Normal use is okay.
self.TestLint(
' memset(buf, 0, sizeof(buf))',
'')
# A 0 as the final argument is almost certainly an error.
self.TestLint(
' memset(buf, sizeof(buf), 0)',
'Did you mean "memset(buf, 0, sizeof(buf))"?'
' [runtime/memset] [4]')
self.TestLint(
' memset(buf, xsize * ysize, 0)',
'Did you mean "memset(buf, 0, xsize * ysize)"?'
' [runtime/memset] [4]')
# There is legitimate test code that uses this form.
# This is okay since the second argument is a literal.
self.TestLint(
" memset(buf, 'y', 0)",
'')
self.TestLint(
' memset(buf, 4, 0)',
'')
self.TestLint(
' memset(buf, -1, 0)',
'')
self.TestLint(
' memset(buf, 0xF1, 0)',
'')
self.TestLint(
' memset(buf, 0xcd, 0)',
'')
def testRedundantVirtual(self):
self.TestLint('virtual void F()', '')
self.TestLint('virtual void F();', '')
self.TestLint('virtual void F() {}', '')
message_template = ('"%s" is redundant since function is already '
'declared as "%s" [readability/inheritance] [4]')
for virt_specifier in ['override', 'final']:
error_message = message_template % ('virtual', virt_specifier)
self.TestLint('virtual int F() %s' % virt_specifier, error_message)
self.TestLint('virtual int F() %s;' % virt_specifier, error_message)
self.TestLint('virtual int F() %s {' % virt_specifier, error_message)
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'virtual void F(int a,',
' int b) ' + virt_specifier + ';',
'virtual void F(int a,',
' int b) LOCKS_EXCLUDED(lock) ' + virt_specifier + ';',
'virtual void F(int a,',
' int b)',
' LOCKS_EXCLUDED(lock) ' + virt_specifier + ';',
''],
error_collector)
self.assertEquals(
[error_message, error_message, error_message],
error_collector.Results())
error_message = message_template % ('override', 'final')
self.TestLint('int F() override final', error_message)
self.TestLint('int F() override final;', error_message)
self.TestLint('int F() override final {}', error_message)
self.TestLint('int F() final override', error_message)
self.TestLint('int F() final override;', error_message)
self.TestLint('int F() final override {}', error_message)
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'struct A : virtual B {',
' ~A() override;'
'};',
'class C',
' : public D,',
' public virtual E {',
' void Func() override;',
'}',
''],
error_collector)
self.assertEquals('', error_collector.Results())
self.TestLint('void Finalize(AnnotationProto *final) override;', '')
def testCheckDeprecated(self):
self.TestLanguageRulesCheck('foo_test.cc', '#include <iostream>', '')
self.TestLanguageRulesCheck('foo_unittest.cc', '#include <iostream>', '')
def testCheckPosixThreading(self):
self.TestLint('var = sctime_r()', '')
self.TestLint('var = strtok_r()', '')
self.TestLint('var = strtok_r(foo, ba, r)', '')
self.TestLint('var = brand()', '')
self.TestLint('_rand()', '')
self.TestLint('.rand()', '')
self.TestLint('->rand()', '')
self.TestLint('ACMRandom rand(seed)', '')
self.TestLint('ISAACRandom rand()', '')
self.TestLint('var = rand()',
'Consider using rand_r(...) instead of rand(...)'
' for improved thread safety.'
' [runtime/threadsafe_fn] [2]')
self.TestLint('var = strtok(str, delim)',
'Consider using strtok_r(...) '
'instead of strtok(...)'
' for improved thread safety.'
' [runtime/threadsafe_fn] [2]')
def testVlogMisuse(self):
self.TestLint('VLOG(1)', '')
self.TestLint('VLOG(99)', '')
self.TestLint('LOG(ERROR)', '')
self.TestLint('LOG(INFO)', '')
self.TestLint('LOG(WARNING)', '')
self.TestLint('LOG(FATAL)', '')
self.TestLint('LOG(DFATAL)', '')
self.TestLint('VLOG(SOMETHINGWEIRD)', '')
self.TestLint('MYOWNVLOG(ERROR)', '')
errmsg = ('VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.'
' [runtime/vlog] [5]')
self.TestLint('VLOG(ERROR)', errmsg)
self.TestLint('VLOG(INFO)', errmsg)
self.TestLint('VLOG(WARNING)', errmsg)
self.TestLint('VLOG(FATAL)', errmsg)
self.TestLint('VLOG(DFATAL)', errmsg)
self.TestLint(' VLOG(ERROR)', errmsg)
self.TestLint(' VLOG(INFO)', errmsg)
self.TestLint(' VLOG(WARNING)', errmsg)
self.TestLint(' VLOG(FATAL)', errmsg)
self.TestLint(' VLOG(DFATAL)', errmsg)
# Test potential format string bugs like printf(foo).
def testFormatStrings(self):
self.TestLint('printf("foo")', '')
self.TestLint('printf("foo: %s", foo)', '')
self.TestLint('DocidForPrintf(docid)', '') # Should not trigger.
self.TestLint('printf(format, value)', '') # Should not trigger.
self.TestLint('printf(__VA_ARGS__)', '') # Should not trigger.
self.TestLint('printf(format.c_str(), value)', '') # Should not trigger.
self.TestLint('printf(format(index).c_str(), value)', '')
self.TestLint(
'printf(foo)',
'Potential format string bug. Do printf("%s", foo) instead.'
' [runtime/printf] [4]')
self.TestLint(
'printf(foo.c_str())',
'Potential format string bug. '
'Do printf("%s", foo.c_str()) instead.'
' [runtime/printf] [4]')
self.TestLint(
'printf(foo->c_str())',
'Potential format string bug. '
'Do printf("%s", foo->c_str()) instead.'
' [runtime/printf] [4]')
self.TestLint(
'StringPrintf(foo)',
'Potential format string bug. Do StringPrintf("%s", foo) instead.'
''
' [runtime/printf] [4]')
# Test disallowed use of operator& and other operators.
def testIllegalOperatorOverloading(self):
errmsg = ('Unary operator& is dangerous. Do not use it.'
' [runtime/operator] [4]')
self.TestLint('void operator=(const Myclass&)', '')
self.TestLint('void operator&(int a, int b)', '') # binary operator& ok
self.TestLint('void operator&() { }', errmsg)
self.TestLint('void operator & ( ) { }',
['Extra space after ( [whitespace/parens] [2]', errmsg])
# const string reference members are dangerous..
def testConstStringReferenceMembers(self):
errmsg = ('const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.'
' [runtime/member_string_references] [2]')
members_declarations = ['const string& church',
'const string &turing',
'const string & godel']
# TODO(unknown): Enable also these tests if and when we ever
# decide to check for arbitrary member references.
# "const Turing & a",
# "const Church& a",
# "const vector<int>& a",
# "const Kurt::Godel & godel",
# "const Kazimierz::Kuratowski& kk" ]
# The Good.
self.TestLint('void f(const string&)', '')
self.TestLint('const string& f(const string& a, const string& b)', '')
self.TestLint('typedef const string& A;', '')
for decl in members_declarations:
self.TestLint(decl + ' = b;', '')
self.TestLint(decl + ' =', '')
# The Bad.
for decl in members_declarations:
self.TestLint(decl + ';', errmsg)
# Variable-length arrays are not permitted.
def testVariableLengthArrayDetection(self):
errmsg = ('Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size."
' [runtime/arrays] [1]')
self.TestLint('int a[any_old_variable];', errmsg)
self.TestLint('int doublesize[some_var * 2];', errmsg)
self.TestLint('int a[afunction()];', errmsg)
self.TestLint('int a[function(kMaxFooBars)];', errmsg)
self.TestLint('bool a_list[items_->size()];', errmsg)
self.TestLint('namespace::Type buffer[len+1];', errmsg)
self.TestLint('int a[64];', '')
self.TestLint('int a[0xFF];', '')
self.TestLint('int first[256], second[256];', '')
self.TestLint('int array_name[kCompileTimeConstant];', '')
self.TestLint('char buf[somenamespace::kBufSize];', '')
self.TestLint('int array_name[ALL_CAPS];', '')
self.TestLint('AClass array1[foo::bar::ALL_CAPS];', '')
self.TestLint('int a[kMaxStrLen + 1];', '')
self.TestLint('int a[sizeof(foo)];', '')
self.TestLint('int a[sizeof(*foo)];', '')
self.TestLint('int a[sizeof foo];', '')
self.TestLint('int a[sizeof(struct Foo)];', '')
self.TestLint('int a[128 - sizeof(const bar)];', '')
self.TestLint('int a[(sizeof(foo) * 4)];', '')
self.TestLint('int a[(arraysize(fixed_size_array)/2) << 1];', '')
self.TestLint('delete a[some_var];', '')
self.TestLint('return a[some_var];', '')
# DISALLOW_COPY_AND_ASSIGN and DISALLOW_IMPLICIT_CONSTRUCTORS should be at
# end of class if present.
def testDisallowMacrosAtEnd(self):
for macro_name in (
'DISALLOW_COPY_AND_ASSIGN',
'DISALLOW_IMPLICIT_CONSTRUCTORS'):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'class SomeClass {',
' private:',
' %s(SomeClass);' % macro_name,
' int member_;',
'};',
''],
error_collector)
self.assertEquals(
('%s should be the last thing in the class' % macro_name) +
' [readability/constructors] [3]',
error_collector.Results())
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'class OuterClass {',
' private:',
' struct InnerClass {',
' private:',
' %s(InnerClass);' % macro_name,
' int member;',
' };',
'};',
''],
error_collector)
self.assertEquals(
('%s should be the last thing in the class' % macro_name) +
' [readability/constructors] [3]',
error_collector.Results())
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'class OuterClass1 {',
' private:',
' struct InnerClass1 {',
' private:',
' %s(InnerClass1);' % macro_name,
' };',
' %s(OuterClass1);' % macro_name,
'};',
'struct OuterClass2 {',
' private:',
' class InnerClass2 {',
' private:',
' %s(InnerClass2);' % macro_name,
' // comment',
' };',
'',
' %s(OuterClass2);' % macro_name,
'',
' // comment',
'};',
'void Func() {',
' struct LocalClass {',
' private:',
' %s(LocalClass);' % macro_name,
' } variable;',
'}',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# DISALLOW* macros should be in the private: section.
def testMisplacedDisallowMacros(self):
for macro_name in (
'DISALLOW_COPY_AND_ASSIGN',
'DISALLOW_IMPLICIT_CONSTRUCTORS'):
self.TestMultiLineLint(
"""
class A {'
public:
%s(A);
};""" % macro_name,
('%s must be in the private: section' % macro_name) +
' [readability/constructors] [3]')
self.TestMultiLineLint(
"""
struct B {'
%s(B);
};""" % macro_name,
('%s must be in the private: section' % macro_name) +
' [readability/constructors] [3]')
self.TestMultiLineLint(
"""
class Outer1 {'
private:
struct Inner1 {
%s(Inner1);
};
%s(Outer1);
};""" % (macro_name, macro_name),
('%s must be in the private: section' % macro_name) +
' [readability/constructors] [3]')
self.TestMultiLineLint(
"""
class Outer2 {'
private:
class Inner2 {
%s(Inner2);
};
%s(Outer2);
};""" % (macro_name, macro_name),
'')
# Extra checks to make sure that nested classes are handled
# correctly. Use different macros for inner and outer classes so
# that we can tell the error messages apart.
self.TestMultiLineLint(
"""
class Outer3 {
struct Inner3 {
DISALLOW_COPY_AND_ASSIGN(Inner3);
};
DISALLOW_IMPLICIT_CONSTRUCTORS(Outer3);
};""",
('DISALLOW_COPY_AND_ASSIGN must be in the private: section'
' [readability/constructors] [3]'))
self.TestMultiLineLint(
"""
struct Outer4 {
class Inner4 {
DISALLOW_COPY_AND_ASSIGN(Inner4);
};
DISALLOW_IMPLICIT_CONSTRUCTORS(Outer4);
};""",
('DISALLOW_IMPLICIT_CONSTRUCTORS must be in the private: section'
' [readability/constructors] [3]'))
# Brace usage
def testBraces(self):
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array
self.TestLint('int a[3] = { 1, 2, 3 };', '')
self.TestLint(
"""const int foo[] =
{1, 2, 3 };""",
'')
# For single line, unmatched '}' with a ';' is ignored (not enough context)
self.TestMultiLineLint(
"""int a[3] = { 1,
2,
3 };""",
'')
self.TestMultiLineLint(
"""int a[2][3] = { { 1, 2 },
{ 3, 4 } };""",
'')
self.TestMultiLineLint(
"""int a[2][3] =
{ { 1, 2 },
{ 3, 4 } };""",
'')
# CHECK/EXPECT_TRUE/EXPECT_FALSE replacements
def testCheckCheck(self):
self.TestLint('CHECK(x == 42);',
'Consider using CHECK_EQ instead of CHECK(a == b)'
' [readability/check] [2]')
self.TestLint('CHECK(x != 42);',
'Consider using CHECK_NE instead of CHECK(a != b)'
' [readability/check] [2]')
self.TestLint('CHECK(x >= 42);',
'Consider using CHECK_GE instead of CHECK(a >= b)'
' [readability/check] [2]')
self.TestLint('CHECK(x > 42);',
'Consider using CHECK_GT instead of CHECK(a > b)'
' [readability/check] [2]')
self.TestLint('CHECK(x <= 42);',
'Consider using CHECK_LE instead of CHECK(a <= b)'
' [readability/check] [2]')
self.TestLint('CHECK(x < 42);',
'Consider using CHECK_LT instead of CHECK(a < b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x == 42);',
'Consider using DCHECK_EQ instead of DCHECK(a == b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x != 42);',
'Consider using DCHECK_NE instead of DCHECK(a != b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x >= 42);',
'Consider using DCHECK_GE instead of DCHECK(a >= b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x > 42);',
'Consider using DCHECK_GT instead of DCHECK(a > b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x <= 42);',
'Consider using DCHECK_LE instead of DCHECK(a <= b)'
' [readability/check] [2]')
self.TestLint('DCHECK(x < 42);',
'Consider using DCHECK_LT instead of DCHECK(a < b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_TRUE("42" == x);',
'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_TRUE("42" != x);',
'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_TRUE(+42 >= x);',
'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_FALSE(x == 42);',
'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_FALSE(x != 42);',
'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_FALSE(x >= 42);',
'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)'
' [readability/check] [2]')
self.TestLint(
'ASSERT_FALSE(x > 42);',
'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)'
' [readability/check] [2]')
self.TestLint(
'ASSERT_FALSE(x <= 42);',
'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)'
' [readability/check] [2]')
self.TestLint('CHECK(x<42);',
['Missing spaces around <'
' [whitespace/operators] [3]',
'Consider using CHECK_LT instead of CHECK(a < b)'
' [readability/check] [2]'])
self.TestLint('CHECK(x>42);',
['Missing spaces around >'
' [whitespace/operators] [3]',
'Consider using CHECK_GT instead of CHECK(a > b)'
' [readability/check] [2]'])
self.TestLint('using some::namespace::operator<<;', '')
self.TestLint('using some::namespace::operator>>;', '')
self.TestLint('CHECK(x->y == 42);',
'Consider using CHECK_EQ instead of CHECK(a == b)'
' [readability/check] [2]')
self.TestLint(
' EXPECT_TRUE(42 < x); // Random comment.',
'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
' [readability/check] [2]')
self.TestLint(
'EXPECT_TRUE( 42 < x );',
['Extra space after ( in function call'
' [whitespace/parens] [4]',
'Extra space before ) [whitespace/parens] [2]',
'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
' [readability/check] [2]'])
self.TestLint('CHECK(4\'2 == x);',
'Consider using CHECK_EQ instead of CHECK(a == b)'
' [readability/check] [2]')
def testCheckCheckFalsePositives(self):
self.TestLint('CHECK(some_iterator == obj.end());', '')
self.TestLint('EXPECT_TRUE(some_iterator == obj.end());', '')
self.TestLint('EXPECT_FALSE(some_iterator == obj.end());', '')
self.TestLint('CHECK(some_pointer != NULL);', '')
self.TestLint('EXPECT_TRUE(some_pointer != NULL);', '')
self.TestLint('EXPECT_FALSE(some_pointer != NULL);', '')
self.TestLint('CHECK(CreateTestFile(dir, (1 << 20)));', '')
self.TestLint('CHECK(CreateTestFile(dir, (1 >> 20)));', '')
self.TestLint('CHECK(x ^ (y < 42));', '')
self.TestLint('CHECK((x > 42) ^ (x < 54));', '')
self.TestLint('CHECK(a && b < 42);', '')
self.TestLint('CHECK(42 < a && a < b);', '')
self.TestLint('SOFT_CHECK(x > 42);', '')
self.TestMultiLineLint(
"""_STLP_DEFINE_BINARY_OP_CHECK(==, _OP_EQUAL);
_STLP_DEFINE_BINARY_OP_CHECK(!=, _OP_NOT_EQUAL);
_STLP_DEFINE_BINARY_OP_CHECK(<, _OP_LESS_THAN);
_STLP_DEFINE_BINARY_OP_CHECK(<=, _OP_LESS_EQUAL);
_STLP_DEFINE_BINARY_OP_CHECK(>, _OP_GREATER_THAN);
_STLP_DEFINE_BINARY_OP_CHECK(>=, _OP_GREATER_EQUAL);
_STLP_DEFINE_BINARY_OP_CHECK(+, _OP_PLUS);
_STLP_DEFINE_BINARY_OP_CHECK(*, _OP_TIMES);
_STLP_DEFINE_BINARY_OP_CHECK(/, _OP_DIVIDE);
_STLP_DEFINE_BINARY_OP_CHECK(-, _OP_SUBTRACT);
_STLP_DEFINE_BINARY_OP_CHECK(%, _OP_MOD);""",
'')
self.TestLint('CHECK(x < 42) << "Custom error message";', '')
# Alternative token to punctuation operator replacements
def testCheckAltTokens(self):
self.TestLint('true or true',
'Use operator || instead of or'
' [readability/alt_tokens] [2]')
self.TestLint('true and true',
'Use operator && instead of and'
' [readability/alt_tokens] [2]')
self.TestLint('if (not true)',
'Use operator ! instead of not'
' [readability/alt_tokens] [2]')
self.TestLint('1 bitor 1',
'Use operator | instead of bitor'
' [readability/alt_tokens] [2]')
self.TestLint('1 xor 1',
'Use operator ^ instead of xor'
' [readability/alt_tokens] [2]')
self.TestLint('1 bitand 1',
'Use operator & instead of bitand'
' [readability/alt_tokens] [2]')
self.TestLint('x = compl 1',
'Use operator ~ instead of compl'
' [readability/alt_tokens] [2]')
self.TestLint('x and_eq y',
'Use operator &= instead of and_eq'
' [readability/alt_tokens] [2]')
self.TestLint('x or_eq y',
'Use operator |= instead of or_eq'
' [readability/alt_tokens] [2]')
self.TestLint('x xor_eq y',
'Use operator ^= instead of xor_eq'
' [readability/alt_tokens] [2]')
self.TestLint('x not_eq y',
'Use operator != instead of not_eq'
' [readability/alt_tokens] [2]')
self.TestLint('line_continuation or',
'Use operator || instead of or'
' [readability/alt_tokens] [2]')
self.TestLint('if(true and(parentheses',
'Use operator && instead of and'
' [readability/alt_tokens] [2]')
self.TestLint('#include "base/false-and-false.h"', '')
self.TestLint('#error false or false', '')
self.TestLint('false nor false', '')
self.TestLint('false nand false', '')
# Passing and returning non-const references
def testNonConstReference(self):
# Passing a non-const reference as function parameter is forbidden.
operand_error_message = ('Is this a non-const reference? '
'If so, make const or use a pointer: %s'
' [runtime/references] [2]')
# Warn of use of a non-const reference in operators and functions
self.TestLint('bool operator>(Foo& s, Foo& f);',
[operand_error_message % 'Foo& s',
operand_error_message % 'Foo& f'])
self.TestLint('bool operator+(Foo& s, Foo& f);',
[operand_error_message % 'Foo& s',
operand_error_message % 'Foo& f'])
self.TestLint('int len(Foo& s);', operand_error_message % 'Foo& s')
# Allow use of non-const references in a few specific cases
self.TestLint('stream& operator>>(stream& s, Foo& f);', '')
self.TestLint('stream& operator<<(stream& s, Foo& f);', '')
self.TestLint('void swap(Bar& a, Bar& b);', '')
self.TestLint('ostream& LogFunc(ostream& s);', '')
self.TestLint('ostringstream& LogFunc(ostringstream& s);', '')
self.TestLint('istream& LogFunc(istream& s);', '')
self.TestLint('istringstream& LogFunc(istringstream& s);', '')
# Returning a non-const reference from a function is OK.
self.TestLint('int& g();', '')
# Passing a const reference to a struct (using the struct keyword) is OK.
self.TestLint('void foo(const struct tm& tm);', '')
# Passing a const reference to a typename is OK.
self.TestLint('void foo(const typename tm& tm);', '')
# Const reference to a pointer type is OK.
self.TestLint('void foo(const Bar* const& p) {', '')
self.TestLint('void foo(Bar const* const& p) {', '')
self.TestLint('void foo(Bar* const& p) {', '')
# Const reference to a templated type is OK.
self.TestLint('void foo(const std::vector<std::string>& v);', '')
# Non-const reference to a pointer type is not OK.
self.TestLint('void foo(Bar*& p);',
operand_error_message % 'Bar*& p')
self.TestLint('void foo(const Bar*& p);',
operand_error_message % 'const Bar*& p')
self.TestLint('void foo(Bar const*& p);',
operand_error_message % 'Bar const*& p')
self.TestLint('void foo(struct Bar*& p);',
operand_error_message % 'struct Bar*& p')
self.TestLint('void foo(const struct Bar*& p);',
operand_error_message % 'const struct Bar*& p')
self.TestLint('void foo(struct Bar const*& p);',
operand_error_message % 'struct Bar const*& p')
# Non-const reference to a templated type is not OK.
self.TestLint('void foo(std::vector<int>& p);',
operand_error_message % 'std::vector<int>& p')
# Returning an address of something is not prohibited.
self.TestLint('return &something;', '')
self.TestLint('if (condition) {return &something; }', '')
self.TestLint('if (condition) return &something;', '')
self.TestLint('if (condition) address = &something;', '')
self.TestLint('if (condition) result = lhs&rhs;', '')
self.TestLint('if (condition) result = lhs & rhs;', '')
self.TestLint('a = (b+c) * sizeof &f;', '')
self.TestLint('a = MySize(b) * sizeof &f;', '')
# We don't get confused by C++11 range-based for loops.
self.TestLint('for (const string& s : c)', '')
self.TestLint('for (auto& r : c)', '')
self.TestLint('for (typename Type& a : b)', '')
# We don't get confused by some other uses of '&'.
self.TestLint('T& operator=(const T& t);', '')
self.TestLint('int g() { return (a & b); }', '')
self.TestLint('T& r = (T&)*(vp());', '')
self.TestLint('T& r = v', '')
self.TestLint('static_assert((kBits & kMask) == 0, "text");', '')
self.TestLint('COMPILE_ASSERT((kBits & kMask) == 0, text);', '')
# Spaces before template arguments. This is poor style, but
# happens 0.15% of the time.
self.TestLint('void Func(const vector <int> &const_x, '
'vector <int> &nonconst_x) {',
operand_error_message % 'vector<int> &nonconst_x')
# Derived member functions are spared from override check
self.TestLint('void Func(X& x);', operand_error_message % 'X& x')
self.TestLint('void Func(X& x) {}', operand_error_message % 'X& x')
self.TestLint('void Func(X& x) override;', '')
self.TestLint('void Func(X& x) override {', '')
self.TestLint('void Func(X& x) const override;', '')
self.TestLint('void Func(X& x) const override {', '')
# Don't warn on out-of-line method definitions.
self.TestLint('void NS::Func(X& x) {', '')
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'void a::b() {}',
'void f(int& q) {}',
''],
error_collector)
self.assertEquals(
operand_error_message % 'int& q',
error_collector.Results())
# Other potential false positives. These need full parser
# state to reproduce as opposed to just TestLint.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'void swap(int &x,',
' int &y) {',
'}',
'void swap(',
' sparsegroup<T, GROUP_SIZE, Alloc> &x,',
' sparsegroup<T, GROUP_SIZE, Alloc> &y) {',
'}',
'ostream& operator<<(',
' ostream& out',
' const dense_hash_set<Value, Hash, Equals, Alloc>& seq) {',
'}',
'class A {',
' void Function(',
' string &x) override {',
' }',
'};',
'void Derived::Function(',
' string &x) {',
'}',
'#define UNSUPPORTED_MASK(_mask) \\',
' if (flags & _mask) { \\',
' LOG(FATAL) << "Unsupported flag: " << #_mask; \\',
' }',
'Constructor::Constructor()',
' : initializer1_(a1 & b1),',
' initializer2_(a2 & b2) {',
'}',
'Constructor::Constructor()',
' : initializer1_{a3 & b3},',
' initializer2_(a4 & b4) {',
'}',
'Constructor::Constructor()',
' : initializer1_{a5 & b5},',
' initializer2_(a6 & b6) {}',
''],
error_collector)
self.assertEquals('', error_collector.Results())
# Multi-line references
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'void Func(const Outer::',
' Inner& const_x,',
' const Outer',
' ::Inner& const_y,',
' const Outer<',
' int>::Inner& const_z,',
' Outer::',
' Inner& nonconst_x,',
' Outer',
' ::Inner& nonconst_y,',
' Outer<',
' int>::Inner& nonconst_z) {',
'}',
''],
error_collector)
self.assertEquals(
[operand_error_message % 'Outer::Inner& nonconst_x',
operand_error_message % 'Outer::Inner& nonconst_y',
operand_error_message % 'Outer<int>::Inner& nonconst_z'],
error_collector.Results())
# A peculiar false positive due to bad template argument parsing
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'inline RCULocked<X>::ReadPtr::ReadPtr(const RCULocked* rcu) {',
' DCHECK(!(data & kFlagMask)) << "Error";',
'}',
'',
'RCULocked<X>::WritePtr::WritePtr(RCULocked* rcu)',
' : lock_(&rcu_->mutex_) {',
'}',
''],
error_collector.Results())
self.assertEquals('', error_collector.Results())
def testBraceAtBeginOfLine(self):
self.TestLint('{',
'{ should almost always be at the end of the previous line'
' [whitespace/braces] [4]')
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['int function()',
'{', # warning here
' MutexLock l(&mu);',
'}',
'int variable;'
'{', # no warning
' MutexLock l(&mu);',
'}',
'MyType m = {',
' {value1, value2},',
' {', # no warning
' loooong_value1, looooong_value2',
' }',
'};',
'#if PREPROCESSOR',
'{', # no warning
' MutexLock l(&mu);',
'}',
'#endif'],
error_collector)
self.assertEquals(1, error_collector.Results().count(
'{ should almost always be at the end of the previous line'
' [whitespace/braces] [4]'))
self.TestMultiLineLint(
"""
foo(
{
loooooooooooooooong_value,
});""",
'')
def testMismatchingSpacesInParens(self):
self.TestLint('if (foo ) {', 'Mismatching spaces inside () in if'
' [whitespace/parens] [5]')
self.TestLint('switch ( foo) {', 'Mismatching spaces inside () in switch'
' [whitespace/parens] [5]')
self.TestLint('for (foo; ba; bar ) {', 'Mismatching spaces inside () in for'
' [whitespace/parens] [5]')
self.TestLint('for (; foo; bar) {', '')
self.TestLint('for ( ; foo; bar) {', '')
self.TestLint('for ( ; foo; bar ) {', '')
self.TestLint('for (foo; bar; ) {', '')
self.TestLint('while ( foo ) {', 'Should have zero or one spaces inside'
' ( and ) in while [whitespace/parens] [5]')
def testSpacingForFncall(self):
self.TestLint('if (foo) {', '')
self.TestLint('for (foo; bar; baz) {', '')
self.TestLint('for (;;) {', '')
# Space should be allowed in placement new operators.
self.TestLint('Something* p = new (place) Something();', '')
# Test that there is no warning when increment statement is empty.
self.TestLint('for (foo; baz;) {', '')
self.TestLint('for (foo;bar;baz) {', 'Missing space after ;'
' [whitespace/semicolon] [3]')
# we don't warn about this semicolon, at least for now
self.TestLint('if (condition) {return &something; }',
'')
# seen in some macros
self.TestLint('DoSth();\\', '')
# Test that there is no warning about semicolon here.
self.TestLint('abc;// this is abc',
'At least two spaces is best between code'
' and comments [whitespace/comments] [2]')
self.TestLint('while (foo) {', '')
self.TestLint('switch (foo) {', '')
self.TestLint('foo( bar)', 'Extra space after ( in function call'
' [whitespace/parens] [4]')
self.TestLint('foo( // comment', '')
self.TestLint('foo( // comment',
'At least two spaces is best between code'
' and comments [whitespace/comments] [2]')
self.TestLint('foobar( \\', '')
self.TestLint('foobar( \\', '')
self.TestLint('( a + b)', 'Extra space after ('
' [whitespace/parens] [2]')
self.TestLint('((a+b))', '')
self.TestLint('foo (foo)', 'Extra space before ( in function call'
' [whitespace/parens] [4]')
# asm volatile () may have a space, as it isn't a function call.
self.TestLint('asm volatile ("")', '')
self.TestLint('__asm__ __volatile__ ("")', '')
self.TestLint('} catch (const Foo& ex) {', '')
self.TestLint('case (42):', '')
self.TestLint('typedef foo (*foo)(foo)', '')
self.TestLint('typedef foo (*foo12bar_)(foo)', '')
self.TestLint('typedef foo (Foo::*bar)(foo)', '')
self.TestLint('using foo = type (Foo::*bar)(foo)', '')
self.TestLint('using foo = type (Foo::*bar)(', '')
self.TestLint('using foo = type (Foo::*)(', '')
self.TestLint('foo (Foo::*bar)(', '')
self.TestLint('foo (x::y::*z)(', '')
self.TestLint('foo (Foo::bar)(',
'Extra space before ( in function call'
' [whitespace/parens] [4]')
self.TestLint('foo (*bar)(', '')
self.TestLint('typedef foo (Foo::*bar)(', '')
self.TestLint('(foo)(bar)', '')
self.TestLint('Foo (*foo)(bar)', '')
self.TestLint('Foo (*foo)(Bar bar,', '')
self.TestLint('char (*p)[sizeof(foo)] = &foo', '')
self.TestLint('char (&ref)[sizeof(foo)] = &foo', '')
self.TestLint('const char32 (*table[])[6];', '')
# The sizeof operator is often written as if it were a function call, with
# an opening parenthesis directly following the operator name, but it can
# also be written like any other operator, with a space following the
# operator name, and the argument optionally in parentheses.
self.TestLint('sizeof(foo)', '')
self.TestLint('sizeof foo', '')
self.TestLint('sizeof (foo)', '')
def testSpacingBeforeBraces(self):
self.TestLint('if (foo){', 'Missing space before {'
' [whitespace/braces] [5]')
self.TestLint('for{', 'Missing space before {'
' [whitespace/braces] [5]')
self.TestLint('for {', '')
self.TestLint('EXPECT_DEBUG_DEATH({', '')
self.TestLint('std::is_convertible<A, B>{}', '')
self.TestLint('blah{32}', 'Missing space before {'
' [whitespace/braces] [5]')
self.TestLint('int8_t{3}', '')
self.TestLint('int16_t{3}', '')
self.TestLint('int32_t{3}', '')
self.TestLint('uint64_t{12345}', '')
self.TestLint('constexpr int64_t kBatchGapMicros ='
' int64_t{7} * 24 * 3600 * 1000000; // 1 wk.', '')
self.TestLint('MoveOnly(int i1, int i2) : ip1{new int{i1}}, '
'ip2{new int{i2}} {}',
'')
def testSemiColonAfterBraces(self):
self.TestLint('if (cond) { func(); };',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestLint('void Func() {};',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestLint('void Func() const {};',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestLint('class X {};', '')
for keyword in ['struct', 'union']:
for align in ['', ' alignas(16)']:
for typename in ['', ' X']:
for identifier in ['', ' x']:
self.TestLint(keyword + align + typename + ' {}' + identifier + ';',
'')
self.TestLint('class X : public Y {};', '')
self.TestLint('class X : public MACRO() {};', '')
self.TestLint('class X : public decltype(expr) {};', '')
self.TestLint('DEFINE_FACADE(PCQueue::Watcher, PCQueue) {};', '')
self.TestLint('VCLASS(XfaTest, XfaContextTest) {};', '')
self.TestLint('class STUBBY_CLASS(H, E) {};', '')
self.TestLint('class STUBBY2_CLASS(H, E) {};', '')
self.TestLint('TEST(TestCase, TestName) {};',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestLint('TEST_F(TestCase, TestName) {};',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestLint('file_tocs_[i] = (FileToc) {a, b, c};', '')
self.TestMultiLineLint('class X : public Y,\npublic Z {};', '')
def testLambda(self):
self.TestLint('auto x = []() {};', '')
self.TestLint('return []() {};', '')
self.TestMultiLineLint('auto x = []() {\n};\n', '')
self.TestLint('int operator[](int x) {};',
'You don\'t need a ; after a } [readability/braces] [4]')
self.TestMultiLineLint('auto x = [&a,\nb]() {};', '')
self.TestMultiLineLint('auto x = [&a,\nb]\n() {};', '')
self.TestMultiLineLint('auto x = [&a,\n'
' b](\n'
' int a,\n'
' int b) {\n'
' return a +\n'
' b;\n'
'};\n',
'')
# Avoid false positives with operator[]
self.TestLint('table_to_children[&*table].push_back(dependent);', '')
def testBraceInitializerList(self):
self.TestLint('MyStruct p = {1, 2};', '')
self.TestLint('MyStruct p{1, 2};', '')
self.TestLint('vector<int> p = {1, 2};', '')
self.TestLint('vector<int> p{1, 2};', '')
self.TestLint('x = vector<int>{1, 2};', '')
self.TestLint('x = (struct in_addr){ 0 };', '')
self.TestLint('Func(vector<int>{1, 2})', '')
self.TestLint('Func((struct in_addr){ 0 })', '')
self.TestLint('Func(vector<int>{1, 2}, 3)', '')
self.TestLint('Func((struct in_addr){ 0 }, 3)', '')
self.TestLint('LOG(INFO) << char{7};', '')
self.TestLint('LOG(INFO) << char{7} << "!";', '')
self.TestLint('int p[2] = {1, 2};', '')
self.TestLint('return {1, 2};', '')
self.TestLint('std::unique_ptr<Foo> foo{new Foo{}};', '')
self.TestLint('auto foo = std::unique_ptr<Foo>{new Foo{}};', '')
self.TestLint('static_assert(Max7String{}.IsValid(), "");', '')
self.TestLint('map_of_pairs[{1, 2}] = 3;', '')
self.TestLint('ItemView{has_offer() ? new Offer{offer()} : nullptr', '')
self.TestLint('template <class T, EnableIf<::std::is_const<T>{}> = 0>', '')
self.TestMultiLineLint('std::unique_ptr<Foo> foo{\n'
' new Foo{}\n'
'};\n', '')
self.TestMultiLineLint('std::unique_ptr<Foo> foo{\n'
' new Foo{\n'
' new Bar{}\n'
' }\n'
'};\n', '')
self.TestMultiLineLint('if (true) {\n'
' if (false){ func(); }\n'
'}\n',
'Missing space before { [whitespace/braces] [5]')
self.TestMultiLineLint('MyClass::MyClass()\n'
' : initializer_{\n'
' Func()} {\n'
'}\n', '')
self.TestLint('const pair<string, string> kCL' +
('o' * 41) + 'gStr[] = {\n',
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
self.TestMultiLineLint('const pair<string, string> kCL' +
('o' * 40) + 'ngStr[] =\n'
' {\n'
' {"gooooo", "oooogle"},\n'
'};\n', '')
self.TestMultiLineLint('const pair<string, string> kCL' +
('o' * 39) + 'ngStr[] =\n'
' {\n'
' {"gooooo", "oooogle"},\n'
'};\n', '{ should almost always be at the end of '
'the previous line [whitespace/braces] [4]')
def testSpacingAroundElse(self):
self.TestLint('}else {', 'Missing space before else'
' [whitespace/braces] [5]')
self.TestLint('} else{', 'Missing space before {'
' [whitespace/braces] [5]')
self.TestLint('} else {', '')
self.TestLint('} else if (foo) {', '')
def testSpacingWithInitializerLists(self):
self.TestLint('int v[1][3] = {{1, 2, 3}};', '')
self.TestLint('int v[1][1] = {{0}};', '')
def testSpacingForBinaryOps(self):
self.TestLint('if (foo||bar) {', 'Missing spaces around ||'
' [whitespace/operators] [3]')
self.TestLint('if (foo<=bar) {', 'Missing spaces around <='
' [whitespace/operators] [3]')
self.TestLint('if (foo<bar) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.TestLint('if (foo>bar) {', 'Missing spaces around >'
' [whitespace/operators] [3]')
self.TestLint('if (foo<bar->baz) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.TestLint('if (foo<bar->bar) {', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.TestLint('template<typename T = double>', '')
self.TestLint('std::unique_ptr<No<Spaces>>', '')
self.TestLint('typedef hash_map<Foo, Bar>', '')
self.TestLint('10<<20', '')
self.TestLint('10<<a',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('a<<20',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('a<<b',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('10LL<<20', '')
self.TestLint('10ULL<<20', '')
self.TestLint('a>>b',
'Missing spaces around >> [whitespace/operators] [3]')
self.TestLint('10>>b',
'Missing spaces around >> [whitespace/operators] [3]')
self.TestLint('LOG(ERROR)<<*foo',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('LOG(ERROR)<<&foo',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('StringCoder<vector<string>>::ToString()', '')
self.TestLint('map<pair<int, int>, map<int, int>>::iterator', '')
self.TestLint('func<int, pair<int, pair<int, int>>>()', '')
self.TestLint('MACRO1(list<list<int>>)', '')
self.TestLint('MACRO2(list<list<int>>, 42)', '')
self.TestLint('void DoFoo(const set<vector<string>>& arg1);', '')
self.TestLint('void SetFoo(set<vector<string>>* arg1);', '')
self.TestLint('foo = new set<vector<string>>;', '')
self.TestLint('reinterpret_cast<set<vector<string>>*>(a);', '')
self.TestLint('MACRO(<<)', '')
self.TestLint('MACRO(<<, arg)', '')
self.TestLint('MACRO(<<=)', '')
self.TestLint('MACRO(<<=, arg)', '')
self.TestLint('using Vector3<T>::operator==;', '')
self.TestLint('using Vector3<T>::operator!=;', '')
def testSpacingBeforeLastSemicolon(self):
self.TestLint('call_function() ;',
'Extra space before last semicolon. If this should be an '
'empty statement, use {} instead.'
' [whitespace/semicolon] [5]')
self.TestLint('while (true) ;',
'Extra space before last semicolon. If this should be an '
'empty statement, use {} instead.'
' [whitespace/semicolon] [5]')
self.TestLint('default:;',
'Semicolon defining empty statement. Use {} instead.'
' [whitespace/semicolon] [5]')
self.TestLint(' ;',
'Line contains only semicolon. If this should be an empty '
'statement, use {} instead.'
' [whitespace/semicolon] [5]')
self.TestLint('for (int i = 0; ;', '')
def testEmptyBlockBody(self):
self.TestLint('while (true);',
'Empty loop bodies should use {} or continue'
' [whitespace/empty_loop_body] [5]')
self.TestLint('if (true);',
'Empty conditional bodies should use {}'
' [whitespace/empty_conditional_body] [5]')
self.TestLint('while (true)', '')
self.TestLint('while (true) continue;', '')
self.TestLint('for (;;);',
'Empty loop bodies should use {} or continue'
' [whitespace/empty_loop_body] [5]')
self.TestLint('for (;;)', '')
self.TestLint('for (;;) continue;', '')
self.TestLint('for (;;) func();', '')
self.TestLint('if (test) {}',
'If statement had no body and no else clause'
' [whitespace/empty_if_body] [4]')
self.TestLint('if (test) func();', '')
self.TestLint('if (test) {} else {}', '')
self.TestMultiLineLint("""while (true &&
false);""",
'Empty loop bodies should use {} or continue'
' [whitespace/empty_loop_body] [5]')
self.TestMultiLineLint("""do {
} while (false);""",
'')
self.TestMultiLineLint("""#define MACRO \\
do { \\
} while (false);""",
'')
self.TestMultiLineLint("""do {
} while (false); // next line gets a warning
while (false);""",
'Empty loop bodies should use {} or continue'
' [whitespace/empty_loop_body] [5]')
self.TestMultiLineLint("""if (test) {
}""",
'If statement had no body and no else clause'
' [whitespace/empty_if_body] [4]')
self.TestMultiLineLint("""if (test,
func({})) {
}""",
'If statement had no body and no else clause'
' [whitespace/empty_if_body] [4]')
self.TestMultiLineLint("""if (test)
func();""", '')
self.TestLint('if (test) { hello; }', '')
self.TestLint('if (test({})) { hello; }', '')
self.TestMultiLineLint("""if (test) {
func();
}""", '')
self.TestMultiLineLint("""if (test) {
// multiline
// comment
}""", '')
self.TestMultiLineLint("""if (test) { // comment
}""", '')
self.TestMultiLineLint("""if (test) {
} else {
}""", '')
self.TestMultiLineLint("""if (func(p1,
p2,
p3)) {
func();
}""", '')
self.TestMultiLineLint("""if (func({}, p1)) {
func();
}""", '')
def testSpacingForRangeBasedFor(self):
# Basic correctly formatted case:
self.TestLint('for (int i : numbers) {', '')
# Missing space before colon:
self.TestLint('for (int i: numbers) {',
'Missing space around colon in range-based for loop'
' [whitespace/forcolon] [2]')
# Missing space after colon:
self.TestLint('for (int i :numbers) {',
'Missing space around colon in range-based for loop'
' [whitespace/forcolon] [2]')
# Missing spaces both before and after the colon.
self.TestLint('for (int i:numbers) {',
'Missing space around colon in range-based for loop'
' [whitespace/forcolon] [2]')
# The scope operator '::' shouldn't cause warnings...
self.TestLint('for (std::size_t i : sizes) {}', '')
# ...but it shouldn't suppress them either.
self.TestLint('for (std::size_t i: sizes) {}',
'Missing space around colon in range-based for loop'
' [whitespace/forcolon] [2]')
# Static or global STL strings.
def testStaticOrGlobalSTLStrings(self):
# A template for the error message for a const global/static string.
error_msg = ('For a static/global string constant, use a C style '
'string instead: "%s[]". [runtime/string] [4]')
# The error message for a non-const global/static string variable.
nonconst_error_msg = ('Static/global string variables are not permitted.'
' [runtime/string] [4]')
self.TestLint('string foo;',
nonconst_error_msg)
self.TestLint('string kFoo = "hello"; // English',
nonconst_error_msg)
self.TestLint('static string foo;',
nonconst_error_msg)
self.TestLint('static const string foo;',
error_msg % 'static const char foo')
self.TestLint('static const std::string foo;',
error_msg % 'static const char foo')
self.TestLint('string Foo::bar;',
nonconst_error_msg)
self.TestLint('std::string foo;',
nonconst_error_msg)
self.TestLint('std::string kFoo = "hello"; // English',
nonconst_error_msg)
self.TestLint('static std::string foo;',
nonconst_error_msg)
self.TestLint('static const std::string foo;',
error_msg % 'static const char foo')
self.TestLint('std::string Foo::bar;',
nonconst_error_msg)
self.TestLint('::std::string foo;',
nonconst_error_msg)
self.TestLint('::std::string kFoo = "hello"; // English',
nonconst_error_msg)
self.TestLint('static ::std::string foo;',
nonconst_error_msg)
self.TestLint('static const ::std::string foo;',
error_msg % 'static const char foo')
self.TestLint('::std::string Foo::bar;',
nonconst_error_msg)
self.TestLint('string* pointer', '')
self.TestLint('string *pointer', '')
self.TestLint('string* pointer = Func();', '')
self.TestLint('string *pointer = Func();', '')
self.TestLint('const string* pointer', '')
self.TestLint('const string *pointer', '')
self.TestLint('const string* pointer = Func();', '')
self.TestLint('const string *pointer = Func();', '')
self.TestLint('string const* pointer', '')
self.TestLint('string const *pointer', '')
self.TestLint('string const* pointer = Func();', '')
self.TestLint('string const *pointer = Func();', '')
self.TestLint('string* const pointer', '')
self.TestLint('string *const pointer', '')
self.TestLint('string* const pointer = Func();', '')
self.TestLint('string *const pointer = Func();', '')
self.TestLint('string Foo::bar() {}', '')
self.TestLint('string Foo::operator*() {}', '')
# Rare case.
self.TestLint('string foo("foobar");', nonconst_error_msg)
# Should not catch local or member variables.
self.TestLint(' string foo', '')
# Should not catch functions.
self.TestLint('string EmptyString() { return ""; }', '')
self.TestLint('string EmptyString () { return ""; }', '')
self.TestLint('string const& FileInfo::Pathname() const;', '')
self.TestLint('string const &FileInfo::Pathname() const;', '')
self.TestLint('string VeryLongNameFunctionSometimesEndsWith(\n'
' VeryLongNameType very_long_name_variable) {}', '')
self.TestLint('template<>\n'
'string FunctionTemplateSpecialization<SomeType>(\n'
' int x) { return ""; }', '')
self.TestLint('template<>\n'
'string FunctionTemplateSpecialization<vector<A::B>* >(\n'
' int x) { return ""; }', '')
# should not catch methods of template classes.
self.TestLint('string Class<Type>::Method() const {\n'
' return "";\n'
'}\n', '')
self.TestLint('string Class<Type>::Method(\n'
' int arg) const {\n'
' return "";\n'
'}\n', '')
# Check multiline cases.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'string Class',
'::MemberFunction1();',
'string Class::',
'MemberFunction2();',
'string Class::',
'NestedClass::MemberFunction3();',
'string TemplateClass<T>::',
'NestedClass::MemberFunction4();',
'const string Class',
'::static_member_variable1;',
'const string Class::',
'static_member_variable2;',
'const string Class',
'::static_member_variable3 = "initial value";',
'const string Class::',
'static_member_variable4 = "initial value";',
'string Class::',
'static_member_variable5;',
''],
error_collector)
self.assertEquals(error_collector.Results(),
[error_msg % 'const char Class::static_member_variable1',
error_msg % 'const char Class::static_member_variable2',
error_msg % 'const char Class::static_member_variable3',
error_msg % 'const char Class::static_member_variable4',
nonconst_error_msg])
def testNoSpacesInFunctionCalls(self):
self.TestLint('TellStory(1, 3);',
'')
self.TestLint('TellStory(1, 3 );',
'Extra space before )'
' [whitespace/parens] [2]')
self.TestLint('TellStory(1 /* wolf */, 3 /* pigs */);',
'')
self.TestMultiLineLint("""TellStory(1, 3
);""",
'Closing ) should be moved to the previous line'
' [whitespace/parens] [2]')
self.TestMultiLineLint("""TellStory(Wolves(1),
Pigs(3
));""",
'Closing ) should be moved to the previous line'
' [whitespace/parens] [2]')
self.TestMultiLineLint("""TellStory(1,
3 );""",
'Extra space before )'
' [whitespace/parens] [2]')
def testToDoComments(self):
start_space = ('Too many spaces before TODO'
' [whitespace/todo] [2]')
missing_username = ('Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."'
' [readability/todo] [2]')
end_space = ('TODO(my_username) should be followed by a space'
' [whitespace/todo] [2]')
self.TestLint('// TODOfix this',
[start_space, missing_username, end_space])
self.TestLint('// TODO(ljenkins)fix this',
[start_space, end_space])
self.TestLint('// TODO fix this',
[start_space, missing_username])
self.TestLint('// TODO fix this', missing_username)
self.TestLint('// TODO: fix this', missing_username)
self.TestLint('//TODO(ljenkins): Fix this',
'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.TestLint('// TODO(ljenkins):Fix this', end_space)
self.TestLint('// TODO(ljenkins):', '')
self.TestLint('// TODO(ljenkins): fix this', '')
self.TestLint('// TODO(ljenkins): Fix this', '')
self.TestLint('#if 1 // TEST_URLTODOCID_WHICH_HAS_THAT_WORD_IN_IT_H_', '')
self.TestLint('// See also similar TODO above', '')
self.TestLint(r'EXPECT_EQ("\\", '
r'NormalizePath("/./../foo///bar/..//x/../..", ""));',
'')
def testTwoSpacesBetweenCodeAndComments(self):
self.TestLint('} // namespace foo',
'At least two spaces is best between code and comments'
' [whitespace/comments] [2]')
self.TestLint('}// namespace foo',
'At least two spaces is best between code and comments'
' [whitespace/comments] [2]')
self.TestLint('printf("foo"); // Outside quotes.',
'At least two spaces is best between code and comments'
' [whitespace/comments] [2]')
self.TestLint('int i = 0; // Having two spaces is fine.', '')
self.TestLint('int i = 0; // Having three spaces is OK.', '')
self.TestLint('// Top level comment', '')
self.TestLint(' // Line starts with two spaces.', '')
self.TestMultiLineLint('void foo() {\n'
' { // A scope is opening.\n'
' int a;', '')
self.TestMultiLineLint('void foo() {\n'
' { // A scope is opening.\n'
'#define A a',
'At least two spaces is best between code and '
'comments [whitespace/comments] [2]')
self.TestMultiLineLint(' foo();\n'
' { // An indented scope is opening.\n'
' int a;', '')
self.TestMultiLineLint('vector<int> my_elements = {// first\n'
' 1,', '')
self.TestMultiLineLint('vector<int> my_elements = {// my_elements is ..\n'
' 1,',
'At least two spaces is best between code and '
'comments [whitespace/comments] [2]')
self.TestLint('if (foo) { // not a pure scope; comment is too close!',
'At least two spaces is best between code and comments'
' [whitespace/comments] [2]')
self.TestLint('printf("// In quotes.")', '')
self.TestLint('printf("\\"%s // In quotes.")', '')
self.TestLint('printf("%s", "// In quotes.")', '')
def testSpaceAfterCommentMarker(self):
self.TestLint('//', '')
self.TestLint('//x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.TestLint('// x', '')
self.TestLint('///', '')
self.TestLint('/// x', '')
self.TestLint('//!', '')
self.TestLint('//----', '')
self.TestLint('//====', '')
self.TestLint('//////', '')
self.TestLint('////// x', '')
self.TestLint('///< x', '') # After-member Doxygen comment
self.TestLint('//!< x', '') # After-member Doxygen comment
self.TestLint('////x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.TestLint('//}', '')
self.TestLint('//}x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.TestLint('//!<x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
self.TestLint('///<x', 'Should have a space between // and comment'
' [whitespace/comments] [4]')
# Test a line preceded by empty or comment lines. There was a bug
# that caused it to print the same warning N times if the erroneous
# line was preceded by N lines of empty or comment lines. To be
# precise, the '// marker so line numbers and indices both start at
# 1' line was also causing the issue.
def testLinePrecededByEmptyOrCommentLines(self):
def DoTest(self, lines):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc', lines, error_collector)
# The warning appears only once.
self.assertEquals(
1,
error_collector.Results().count(
'Do not use namespace using-directives. '
'Use using-declarations instead.'
' [build/namespaces] [5]'))
DoTest(self, ['using namespace foo;'])
DoTest(self, ['', '', '', 'using namespace foo;'])
DoTest(self, ['// hello', 'using namespace foo;'])
def testNewlineAtEOF(self):
def DoTest(self, data, is_missing_eof):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc', data.split('\n'),
error_collector)
# The warning appears only once.
self.assertEquals(
int(is_missing_eof),
error_collector.Results().count(
'Could not find a newline character at the end of the file.'
' [whitespace/ending_newline] [5]'))
DoTest(self, '// Newline\n// at EOF\n', False)
DoTest(self, '// No newline\n// at EOF', True)
def testInvalidUtf8(self):
def DoTest(self, raw_bytes, has_invalid_utf8):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
unicode(raw_bytes, 'utf8', 'replace').split('\n'),
error_collector)
# The warning appears only once.
self.assertEquals(
int(has_invalid_utf8),
error_collector.Results().count(
'Line contains invalid UTF-8'
' (or Unicode replacement character).'
' [readability/utf8] [5]'))
DoTest(self, 'Hello world\n', False)
DoTest(self, '\xe9\x8e\xbd\n', False)
DoTest(self, '\xe9x\x8e\xbd\n', True)
# This is the encoding of the replacement character itself (which
# you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')).
DoTest(self, '\xef\xbf\xbd\n', True)
def testBadCharacters(self):
# Test for NUL bytes only
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('nul.cc', 'cc',
['// Copyright 2014 Your Company.',
'\0', ''], error_collector)
self.assertEquals(
error_collector.Results(),
'Line contains NUL byte. [readability/nul] [5]')
# Make sure both NUL bytes and UTF-8 are caught if they appear on
# the same line.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'nul_utf8.cc', 'cc',
['// Copyright 2014 Your Company.',
unicode('\xe9x\0', 'utf8', 'replace'), ''],
error_collector)
self.assertEquals(
error_collector.Results(),
['Line contains invalid UTF-8 (or Unicode replacement character).'
' [readability/utf8] [5]',
'Line contains NUL byte. [readability/nul] [5]'])
def testIsBlankLine(self):
self.assert_(cpplint.IsBlankLine(''))
self.assert_(cpplint.IsBlankLine(' '))
self.assert_(cpplint.IsBlankLine(' \t\r\n'))
self.assert_(not cpplint.IsBlankLine('int a;'))
self.assert_(not cpplint.IsBlankLine('{'))
def testBlankLinesCheck(self):
self.TestBlankLinesCheck(['{\n', '\n', '\n', '}\n'], 1, 1)
self.TestBlankLinesCheck([' if (foo) {\n', '\n', ' }\n'], 1, 1)
self.TestBlankLinesCheck(
['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0)
self.TestBlankLinesCheck(['\n', 'run("{");\n', '\n'], 0, 0)
self.TestBlankLinesCheck(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0)
self.TestBlankLinesCheck(
['int x(\n', ' int a) {\n', '\n', 'return 0;\n', '}'], 0, 0)
self.TestBlankLinesCheck(
['int x(\n', ' int a) const {\n', '\n', 'return 0;\n', '}'], 0, 0)
self.TestBlankLinesCheck(
['int x(\n', ' int a) {\n', '\n', 'return 0;\n', '}'], 1, 0)
self.TestBlankLinesCheck(
['int x(\n', ' int a) {\n', '\n', 'return 0;\n', '}'], 1, 0)
def testAllowBlankLineBeforeClosingNamespace(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['namespace {',
'',
'} // namespace',
'namespace another_namespace {',
'',
'}',
'namespace {',
'',
'template<class T, ',
' class A = hoge<T>, ',
' class B = piyo<T>, ',
' class C = fuga<T> >',
'class D {',
' public:',
'};',
'', '', '', '',
'}'],
error_collector)
self.assertEquals(0, error_collector.Results().count(
'Redundant blank line at the end of a code block should be deleted.'
' [whitespace/blank_line] [3]'))
def testAllowBlankLineBeforeIfElseChain(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['if (hoge) {',
'', # No warning
'} else if (piyo) {',
'', # No warning
'} else if (piyopiyo) {',
' hoge = true;', # No warning
'} else {',
'', # Warning on this line
'}'],
error_collector)
self.assertEquals(1, error_collector.Results().count(
'Redundant blank line at the end of a code block should be deleted.'
' [whitespace/blank_line] [3]'))
def testAllowBlankLineAfterExtern(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['extern "C" {',
'',
'EXPORTAPI void APICALL Some_function() {}',
'',
'}'],
error_collector)
self.assertEquals(0, error_collector.Results().count(
'Redundant blank line at the start of a code block should be deleted.'
' [whitespace/blank_line] [2]'))
self.assertEquals(0, error_collector.Results().count(
'Redundant blank line at the end of a code block should be deleted.'
' [whitespace/blank_line] [3]'))
def testBlankLineBeforeSectionKeyword(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['class A {',
' public:',
' protected:', # warning 1
' private:', # warning 2
' struct B {',
' public:',
' private:'] + # warning 3
([''] * 100) + # Make A and B longer than 100 lines
[' };',
' struct C {',
' protected:',
' private:', # C is too short for warnings
' };',
'};',
'class D',
' : public {',
' public:', # no warning
'};',
'class E {\\',
' public:\\'] +
(['\\'] * 100) + # Makes E > 100 lines
[' int non_empty_line;\\',
' private:\\', # no warning
' int a;\\',
'};'],
error_collector)
self.assertEquals(2, error_collector.Results().count(
'"private:" should be preceded by a blank line'
' [whitespace/blank_line] [3]'))
self.assertEquals(1, error_collector.Results().count(
'"protected:" should be preceded by a blank line'
' [whitespace/blank_line] [3]'))
def testNoBlankLineAfterSectionKeyword(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['class A {',
' public:',
'', # warning 1
' private:',
'', # warning 2
' struct B {',
' protected:',
'', # warning 3
' };',
'};'],
error_collector)
self.assertEquals(1, error_collector.Results().count(
'Do not leave a blank line after "public:"'
' [whitespace/blank_line] [3]'))
self.assertEquals(1, error_collector.Results().count(
'Do not leave a blank line after "protected:"'
' [whitespace/blank_line] [3]'))
self.assertEquals(1, error_collector.Results().count(
'Do not leave a blank line after "private:"'
' [whitespace/blank_line] [3]'))
def testAllowBlankLinesInRawStrings(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['// Copyright 2014 Your Company.',
'static const char *kData[] = {R"(',
'',
')", R"(',
'',
')"};',
''],
error_collector)
self.assertEquals('', error_collector.Results())
def testElseOnSameLineAsClosingBraces(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['if (hoge) {',
'}',
'else if (piyo) {', # Warning on this line
'}',
' else {' # Warning on this line
'',
'}'],
error_collector)
self.assertEquals(2, error_collector.Results().count(
'An else should appear on the same line as the preceding }'
' [whitespace/newline] [4]'))
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['if (hoge) {',
'',
'}',
'else', # Warning on this line
'{',
'',
'}'],
error_collector)
self.assertEquals(1, error_collector.Results().count(
'An else should appear on the same line as the preceding }'
' [whitespace/newline] [4]'))
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['if (hoge) {',
'',
'}',
'else_function();'],
error_collector)
self.assertEquals(0, error_collector.Results().count(
'An else should appear on the same line as the preceding }'
' [whitespace/newline] [4]'))
def testMultipleStatementsOnSameLine(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['for (int i = 0; i < 1; i++) {}',
'switch (x) {',
' case 0: func(); break; ',
'}',
'sum += MathUtil::SafeIntRound(x); x += 0.1;'],
error_collector)
self.assertEquals(0, error_collector.Results().count(
'More than one command on the same line [whitespace/newline] [0]'))
old_verbose_level = cpplint._cpplint_state.verbose_level
cpplint._cpplint_state.verbose_level = 0
cpplint.ProcessFileData('foo.cc', 'cc',
['sum += MathUtil::SafeIntRound(x); x += 0.1;'],
error_collector)
cpplint._cpplint_state.verbose_level = old_verbose_level
def testEndOfNamespaceComments(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('foo.cc', 'cc',
['namespace {',
'',
'}', # No warning (too short)
'namespace expected {',
'} // namespace mismatched', # Warning here
'namespace {',
'} // namespace mismatched', # Warning here
'namespace outer { namespace nested {'] +
([''] * 10) +
['}', # Warning here
'}', # Warning here
'namespace {'] +
([''] * 10) +
['}', # Warning here
'namespace {'] +
([''] * 10) +
['} // namespace some description', # Anon warning
'namespace {'] +
([''] * 10) +
['} // namespace anonymous', # Variant warning
'namespace {'] +
([''] * 10) +
['} // anonymous namespace (utils)', # Variant
'namespace {'] +
([''] * 10) +
['} // anonymous namespace', # No warning
'namespace missing_comment {'] +
([''] * 10) +
['}', # Warning here
'namespace no_warning {'] +
([''] * 10) +
['} // namespace no_warning',
'namespace no_warning {'] +
([''] * 10) +
['}; // end namespace no_warning',
'#define MACRO \\',
'namespace c_style { \\'] +
(['\\'] * 10) +
['} /* namespace c_style. */ \\',
';'],
error_collector)
self.assertEquals(1, error_collector.Results().count(
'Namespace should be terminated with "// namespace expected"'
' [readability/namespace] [5]'))
self.assertEquals(1, error_collector.Results().count(
'Namespace should be terminated with "// namespace outer"'
' [readability/namespace] [5]'))
self.assertEquals(1, error_collector.Results().count(
'Namespace should be terminated with "// namespace nested"'
' [readability/namespace] [5]'))
self.assertEquals(3, error_collector.Results().count(
'Anonymous namespace should be terminated with "// namespace"'
' [readability/namespace] [5]'))
self.assertEquals(2, error_collector.Results().count(
'Anonymous namespace should be terminated with "// namespace" or'
' "// anonymous namespace"'
' [readability/namespace] [5]'))
self.assertEquals(1, error_collector.Results().count(
'Namespace should be terminated with "// namespace missing_comment"'
' [readability/namespace] [5]'))
self.assertEquals(0, error_collector.Results().count(
'Namespace should be terminated with "// namespace no_warning"'
' [readability/namespace] [5]'))
def testElseClauseNotOnSameLineAsElse(self):
self.TestLint(' else DoSomethingElse();',
'Else clause should never be on same line as else '
'(use 2 lines) [whitespace/newline] [4]')
self.TestLint(' else ifDoSomethingElse();',
'Else clause should never be on same line as else '
'(use 2 lines) [whitespace/newline] [4]')
self.TestLint(' } else if (blah) {', '')
self.TestLint(' variable_ends_in_else = true;', '')
def testComma(self):
self.TestLint('a = f(1,2);',
'Missing space after , [whitespace/comma] [3]')
self.TestLint('int tmp=a,a=b,b=tmp;',
['Missing spaces around = [whitespace/operators] [4]',
'Missing space after , [whitespace/comma] [3]'])
self.TestLint('f(a, /* name */ b);', '')
self.TestLint('f(a, /* name */b);', '')
self.TestLint('f(a, /* name */-1);', '')
self.TestLint('f(a, /* name */"1");', '')
self.TestLint('f(1, /* empty macro arg */, 2)', '')
self.TestLint('f(1,, 2)', '')
self.TestLint('operator,()', '')
self.TestLint('operator,(a,b)',
'Missing space after , [whitespace/comma] [3]')
def testEqualsOperatorSpacing(self):
self.TestLint('int tmp= a;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int tmp =a;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int tmp=a;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int tmp= 7;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int tmp =7;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int tmp=7;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int* tmp=*p;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('int* tmp= *p;',
'Missing spaces around = [whitespace/operators] [4]')
self.TestMultiLineLint(
TrimExtraIndent('''
lookahead_services_=
::strings::Split(FLAGS_ls, ",", ::strings::SkipEmpty());'''),
'Missing spaces around = [whitespace/operators] [4]')
self.TestLint('bool result = a>=42;',
'Missing spaces around >= [whitespace/operators] [3]')
self.TestLint('bool result = a<=42;',
'Missing spaces around <= [whitespace/operators] [3]')
self.TestLint('bool result = a==42;',
'Missing spaces around == [whitespace/operators] [3]')
self.TestLint('auto result = a!=42;',
'Missing spaces around != [whitespace/operators] [3]')
self.TestLint('int a = b!=c;',
'Missing spaces around != [whitespace/operators] [3]')
self.TestLint('a&=42;', '')
self.TestLint('a|=42;', '')
self.TestLint('a^=42;', '')
self.TestLint('a+=42;', '')
self.TestLint('a*=42;', '')
self.TestLint('a/=42;', '')
self.TestLint('a%=42;', '')
self.TestLint('a>>=5;', '')
self.TestLint('a<<=5;', '')
def testShiftOperatorSpacing(self):
self.TestLint('a<<b',
'Missing spaces around << [whitespace/operators] [3]')
self.TestLint('a>>b',
'Missing spaces around >> [whitespace/operators] [3]')
self.TestLint('1<<20', '')
self.TestLint('1024>>10', '')
self.TestLint('Kernel<<<1, 2>>>()', '')
def testIndent(self):
self.TestLint('static int noindent;', '')
self.TestLint(' int two_space_indent;', '')
self.TestLint(' int four_space_indent;', '')
self.TestLint(' int one_space_indent;',
'Weird number of spaces at line-start. '
'Are you using a 2-space indent? [whitespace/indent] [3]')
self.TestLint(' int three_space_indent;',
'Weird number of spaces at line-start. '
'Are you using a 2-space indent? [whitespace/indent] [3]')
self.TestLint(' char* one_space_indent = "public:";',
'Weird number of spaces at line-start. '
'Are you using a 2-space indent? [whitespace/indent] [3]')
self.TestLint(' public:', '')
self.TestLint(' protected:', '')
self.TestLint(' private:', '')
self.TestLint(' protected: \\', '')
self.TestLint(' public: \\', '')
self.TestLint(' private: \\', '')
self.TestMultiLineLint(
TrimExtraIndent("""
class foo {
public slots:
void bar();
};"""),
'Weird number of spaces at line-start. '
'Are you using a 2-space indent? [whitespace/indent] [3]')
self.TestMultiLineLint(
TrimExtraIndent('''
static const char kRawString[] = R"("
")";'''),
'')
self.TestMultiLineLint(
TrimExtraIndent('''
KV<Query,
Tuple<TaxonomyId, PetacatCategoryId, double>>'''),
'')
self.TestMultiLineLint(
' static const char kSingleLineRawString[] = R"(...)";',
'Weird number of spaces at line-start. '
'Are you using a 2-space indent? [whitespace/indent] [3]')
def testSectionIndent(self):
self.TestMultiLineLint(
"""
class A {
public: // no warning
private: // warning here
};""",
'private: should be indented +1 space inside class A'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
class B {
public: // no warning
template<> struct C {
public: // warning here
protected: // no warning
};
};""",
'public: should be indented +1 space inside struct C'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
struct D {
};""",
'Closing brace should be aligned with beginning of struct D'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
template<typename E> class F {
};""",
'Closing brace should be aligned with beginning of class F'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
class G {
Q_OBJECT
public slots:
signals:
};""",
['public slots: should be indented +1 space inside class G'
' [whitespace/indent] [3]',
'signals: should be indented +1 space inside class G'
' [whitespace/indent] [3]'])
self.TestMultiLineLint(
"""
class H {
/* comments */ class I {
public: // no warning
private: // warning here
};
};""",
'private: should be indented +1 space inside class I'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
class J
: public ::K {
public: // no warning
protected: // warning here
};""",
'protected: should be indented +1 space inside class J'
' [whitespace/indent] [3]')
self.TestMultiLineLint(
"""
class L
: public M,
public ::N {
};""",
'')
self.TestMultiLineLint(
"""
template <class O,
class P,
class Q,
typename R>
static void Func() {
}""",
'')
def testConditionals(self):
self.TestMultiLineLint(
"""
if (foo)
goto fail;
goto fail;""",
'If/else bodies with multiple statements require braces'
' [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo)
goto fail; goto fail;""",
'If/else bodies with multiple statements require braces'
' [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo)
foo;
else
goto fail;
goto fail;""",
'If/else bodies with multiple statements require braces'
' [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo) goto fail;
goto fail;""",
'If/else bodies with multiple statements require braces'
' [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo)
if (bar)
baz;
else
qux;""",
'Else clause should be indented at the same level as if. Ambiguous'
' nested if/else chains require braces. [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo)
if (bar)
baz;
else
qux;""",
'Else clause should be indented at the same level as if. Ambiguous'
' nested if/else chains require braces. [readability/braces] [4]')
self.TestMultiLineLint(
"""
if (foo) {
bar;
baz;
} else
qux;""",
'If an else has a brace on one side, it should have it on both'
' [readability/braces] [5]')
self.TestMultiLineLint(
"""
if (foo)
bar;
else {
baz;
}""",
'If an else has a brace on one side, it should have it on both'
' [readability/braces] [5]')
self.TestMultiLineLint(
"""
if (foo)
bar;
else if (baz) {
qux;
}""",
'If an else has a brace on one side, it should have it on both'
' [readability/braces] [5]')
self.TestMultiLineLint(
"""
if (foo) {
bar;
} else if (baz)
qux;""",
'If an else has a brace on one side, it should have it on both'
' [readability/braces] [5]')
self.TestMultiLineLint(
"""
if (foo)
goto fail;
bar;""",
'')
self.TestMultiLineLint(
"""
if (foo
&& bar) {
baz;
qux;
}""",
'')
self.TestMultiLineLint(
"""
if (foo)
goto
fail;""",
'')
self.TestMultiLineLint(
"""
if (foo)
bar;
else
baz;
qux;""",
'')
self.TestMultiLineLint(
"""
for (;;) {
if (foo)
bar;
else
baz;
}""",
'')
self.TestMultiLineLint(
"""
if (foo)
bar;
else if (baz)
baz;""",
'')
self.TestMultiLineLint(
"""
if (foo)
bar;
else
baz;""",
'')
self.TestMultiLineLint(
"""
if (foo) {
bar;
} else {
baz;
}""",
'')
self.TestMultiLineLint(
"""
if (foo) {
bar;
} else if (baz) {
qux;
}""",
'')
# Note: this is an error for a different reason, but should not trigger the
# single-line if error.
self.TestMultiLineLint(
"""
if (foo)
{
bar;
baz;
}""",
'{ should almost always be at the end of the previous line'
' [whitespace/braces] [4]')
self.TestMultiLineLint(
"""
if (foo) { \\
bar; \\
baz; \\
}""",
'')
self.TestMultiLineLint(
"""
void foo() { if (bar) baz; }""",
'')
self.TestMultiLineLint(
"""
#if foo
bar;
#else
baz;
qux;
#endif // foo""",
'')
self.TestMultiLineLint(
"""void F() {
variable = [] { if (true); };
variable =
[] { if (true); };
Call(
[] { if (true); },
[] { if (true); });
}""",
'')
def testTab(self):
self.TestLint('\tint a;',
'Tab found; better to use spaces [whitespace/tab] [1]')
self.TestLint('int a = 5;\t\t// set a to 5',
'Tab found; better to use spaces [whitespace/tab] [1]')
def testParseArguments(self):
old_usage = cpplint._USAGE
old_error_categories = cpplint._ERROR_CATEGORIES
old_output_format = cpplint._cpplint_state.output_format
old_verbose_level = cpplint._cpplint_state.verbose_level
old_filters = cpplint._cpplint_state.filters
old_line_length = cpplint._line_length
old_valid_extensions = cpplint._valid_extensions
try:
# Don't print usage during the tests, or filter categories
cpplint._USAGE = ''
cpplint._ERROR_CATEGORIES = ''
self.assertRaises(SystemExit, cpplint.ParseArguments, [])
self.assertRaises(SystemExit, cpplint.ParseArguments, ['--badopt'])
self.assertRaises(SystemExit, cpplint.ParseArguments, ['--help'])
self.assertRaises(SystemExit, cpplint.ParseArguments, ['--v=0'])
self.assertRaises(SystemExit, cpplint.ParseArguments, ['--filter='])
# This is illegal because all filters must start with + or -
self.assertRaises(SystemExit, cpplint.ParseArguments, ['--filter=foo'])
self.assertRaises(SystemExit, cpplint.ParseArguments,
['--filter=+a,b,-c'])
self.assertEquals(['foo.cc'], cpplint.ParseArguments(['foo.cc']))
self.assertEquals(old_output_format, cpplint._cpplint_state.output_format)
self.assertEquals(old_verbose_level, cpplint._cpplint_state.verbose_level)
self.assertEquals(['foo.cc'],
cpplint.ParseArguments(['--v=1', 'foo.cc']))
self.assertEquals(1, cpplint._cpplint_state.verbose_level)
self.assertEquals(['foo.h'],
cpplint.ParseArguments(['--v=3', 'foo.h']))
self.assertEquals(3, cpplint._cpplint_state.verbose_level)
self.assertEquals(['foo.cpp'],
cpplint.ParseArguments(['--verbose=5', 'foo.cpp']))
self.assertEquals(5, cpplint._cpplint_state.verbose_level)
self.assertRaises(ValueError,
cpplint.ParseArguments, ['--v=f', 'foo.cc'])
self.assertEquals(['foo.cc'],
cpplint.ParseArguments(['--output=emacs', 'foo.cc']))
self.assertEquals('emacs', cpplint._cpplint_state.output_format)
self.assertEquals(['foo.h'],
cpplint.ParseArguments(['--output=vs7', 'foo.h']))
self.assertEquals('vs7', cpplint._cpplint_state.output_format)
self.assertRaises(SystemExit,
cpplint.ParseArguments, ['--output=blah', 'foo.cc'])
filt = '-,+whitespace,-whitespace/indent'
self.assertEquals(['foo.h'],
cpplint.ParseArguments(['--filter='+filt, 'foo.h']))
self.assertEquals(['-', '+whitespace', '-whitespace/indent'],
cpplint._cpplint_state.filters)
self.assertEquals(['foo.cc', 'foo.h'],
cpplint.ParseArguments(['foo.cc', 'foo.h']))
self.assertEqual(['foo.h'],
cpplint.ParseArguments(['--linelength=120', 'foo.h']))
self.assertEqual(120, cpplint._line_length)
self.assertEqual(['foo.h'],
cpplint.ParseArguments(['--extensions=hpp,cpp,cpp', 'foo.h']))
self.assertEqual(set(['hpp', 'cpp']), cpplint._valid_extensions)
finally:
cpplint._USAGE = old_usage
cpplint._ERROR_CATEGORIES = old_error_categories
cpplint._cpplint_state.output_format = old_output_format
cpplint._cpplint_state.verbose_level = old_verbose_level
cpplint._cpplint_state.filters = old_filters
cpplint._line_length = old_line_length
cpplint._valid_extensions = old_valid_extensions
def testLineLength(self):
old_line_length = cpplint._line_length
try:
cpplint._line_length = 80
self.TestLint(
'// H %s' % ('H' * 75),
'')
self.TestLint(
'// H %s' % ('H' * 76),
'Lines should be <= 80 characters long'
' [whitespace/line_length] [2]')
cpplint._line_length = 120
self.TestLint(
'// H %s' % ('H' * 115),
'')
self.TestLint(
'// H %s' % ('H' * 116),
'Lines should be <= 120 characters long'
' [whitespace/line_length] [2]')
finally:
cpplint._line_length = old_line_length
def testFilter(self):
old_filters = cpplint._cpplint_state.filters
try:
cpplint._cpplint_state.SetFilters('-,+whitespace,-whitespace/indent')
self.TestLint(
'// Hello there ',
'Line ends in whitespace. Consider deleting these extra spaces.'
' [whitespace/end_of_line] [4]')
self.TestLint('int a = (int)1.0;', '')
self.TestLint(' weird opening space', '')
finally:
cpplint._cpplint_state.filters = old_filters
def testDefaultFilter(self):
default_filters = cpplint._DEFAULT_FILTERS
old_filters = cpplint._cpplint_state.filters
cpplint._DEFAULT_FILTERS = ['-whitespace']
try:
# Reset filters
cpplint._cpplint_state.SetFilters('')
self.TestLint('// Hello there ', '')
cpplint._cpplint_state.SetFilters('+whitespace/end_of_line')
self.TestLint(
'// Hello there ',
'Line ends in whitespace. Consider deleting these extra spaces.'
' [whitespace/end_of_line] [4]')
self.TestLint(' weird opening space', '')
finally:
cpplint._cpplint_state.filters = old_filters
cpplint._DEFAULT_FILTERS = default_filters
def testDuplicateHeader(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData('path/self.cc', 'cc',
['// Copyright 2014 Your Company. All Rights Reserved.',
'#include "path/self.h"',
'#include "path/duplicate.h"',
'#include "path/duplicate.h"',
'#ifdef MACRO',
'#include "path/unique.h"',
'#else',
'#include "path/unique.h"',
'#endif // MACRO',
''],
error_collector)
self.assertEquals(
['"path/duplicate.h" already included at path/self.cc:3 '
'[build/include] [4]'],
error_collector.ResultList())
def testUnnamedNamespacesInHeaders(self):
self.TestLanguageRulesCheck(
'foo.h', 'namespace {',
'Do not use unnamed namespaces in header files. See'
' https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information. [build/namespaces] [4]')
# namespace registration macros are OK.
self.TestLanguageRulesCheck('foo.h', 'namespace { \\', '')
# named namespaces are OK.
self.TestLanguageRulesCheck('foo.h', 'namespace foo {', '')
self.TestLanguageRulesCheck('foo.h', 'namespace foonamespace {', '')
self.TestLanguageRulesCheck('foo.cc', 'namespace {', '')
self.TestLanguageRulesCheck('foo.cc', 'namespace foo {', '')
def testBuildClass(self):
# Test that the linter can parse to the end of class definitions,
# and that it will report when it can't.
# Use multi-line linter because it performs the ClassState check.
self.TestMultiLineLint(
'class Foo {',
'Failed to find complete declaration of class Foo'
' [build/class] [5]')
# Do the same for namespaces
self.TestMultiLineLint(
'namespace Foo {',
'Failed to find complete declaration of namespace Foo'
' [build/namespaces] [5]')
# Don't warn on forward declarations of various types.
self.TestMultiLineLint(
'class Foo;',
'')
self.TestMultiLineLint(
"""struct Foo*
foo = NewFoo();""",
'')
# Test preprocessor.
self.TestMultiLineLint(
"""#ifdef DERIVE_FROM_GOO
struct Foo : public Goo {
#else
struct Foo : public Hoo {
#endif // DERIVE_FROM_GOO
};""",
'')
self.TestMultiLineLint(
"""
class Foo
#ifdef DERIVE_FROM_GOO
: public Goo {
#else
: public Hoo {
#endif // DERIVE_FROM_GOO
};""",
'')
# Test incomplete class
self.TestMultiLineLint(
'class Foo {',
'Failed to find complete declaration of class Foo'
' [build/class] [5]')
def testBuildEndComment(self):
# The crosstool compiler we currently use will fail to compile the
# code in this test, so we might consider removing the lint check.
self.TestMultiLineLint(
"""#if 0
#endif Not a comment""",
'Uncommented text after #endif is non-standard. Use a comment.'
' [build/endif_comment] [5]')
correct_lines = [
'#endif // text',
'#endif //'
]
for line in correct_lines:
self.TestLint(line, '')
incorrect_lines = [
'#endif',
'#endif Not a comment',
'#endif / One `/` is not enough to start a comment'
]
for line in incorrect_lines:
self.TestLint(
line,
'Uncommented text after #endif is non-standard. Use a comment.'
' [build/endif_comment] [5]')
def testBuildForwardDecl(self):
# The crosstool compiler we currently use will fail to compile the
# code in this test, so we might consider removing the lint check.
self.TestLint('class Foo::Goo;',
'Inner-style forward declarations are invalid.'
' Remove this line.'
' [build/forward_decl] [5]')
def GetBuildHeaderGuardPreprocessorSymbol(self, file_path):
# Figure out the expected header guard by processing an empty file.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h', [], error_collector)
for error in error_collector.ResultList():
matched = re.search(
'No #ifndef header guard found, suggested CPP variable is: '
'([A-Z0-9_]+)',
error)
if matched is not None:
return matched.group(1)
def testBuildHeaderGuard(self):
file_path = 'mydir/foo.h'
expected_guard = self.GetBuildHeaderGuardPreprocessorSymbol(file_path)
self.assertTrue(re.search('MYDIR_FOO_H_$', expected_guard))
# No guard at all: expect one error.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h', [], error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# No header guard, but the error is suppressed.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['// Copyright 2014 Your Company.',
'// NOLINT(build/header_guard)', ''],
error_collector)
self.assertEquals([], error_collector.ResultList())
# Wrong guard
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef FOO_H', '#define FOO_H'], error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# No define
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s' % expected_guard], error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# Mismatched define
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define FOO_H'],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# No endif
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define %s' % expected_guard,
''],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#endif line should be "#endif // %s"'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# Commentless endif
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define %s' % expected_guard,
'#endif'],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#endif line should be "#endif // %s"'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# Commentless endif for old-style guard
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s_' % expected_guard,
'#define %s_' % expected_guard,
'#endif'],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#endif line should be "#endif // %s"'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# No header guard errors
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s' % expected_guard,
'#define %s' % expected_guard,
'#endif // %s' % expected_guard],
error_collector)
for line in error_collector.ResultList():
if line.find('build/header_guard') != -1:
self.fail('Unexpected error: %s' % line)
# No header guard errors for old-style guard
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s_' % expected_guard,
'#define %s_' % expected_guard,
'#endif // %s_' % expected_guard],
error_collector)
for line in error_collector.ResultList():
if line.find('build/header_guard') != -1:
self.fail('Unexpected error: %s' % line)
old_verbose_level = cpplint._cpplint_state.verbose_level
try:
cpplint._cpplint_state.verbose_level = 0
# Warn on old-style guard if verbosity is 0.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef %s_' % expected_guard,
'#define %s_' % expected_guard,
'#endif // %s_' % expected_guard],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [0]' % expected_guard),
error_collector.ResultList())
finally:
cpplint._cpplint_state.verbose_level = old_verbose_level
# Completely incorrect header guard
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef FOO',
'#define FOO',
'#endif // FOO'],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
self.assertEquals(
1,
error_collector.ResultList().count(
'#endif line should be "#endif // %s"'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# incorrect header guard with nolint
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'h',
['#ifndef FOO // NOLINT',
'#define FOO',
'#endif // FOO NOLINT'],
error_collector)
self.assertEquals(
0,
error_collector.ResultList().count(
'#ifndef header guard has wrong style, please use: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
self.assertEquals(
0,
error_collector.ResultList().count(
'#endif line should be "#endif // %s"'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
# Special case for flymake
for test_file in ['mydir/foo_flymake.h', 'mydir/.flymake/foo.h']:
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(test_file, 'h',
['// Copyright 2014 Your Company.', ''],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(
'No #ifndef header guard found, suggested CPP variable is: %s'
' [build/header_guard] [5]' % expected_guard),
error_collector.ResultList())
def testBuildHeaderGuardWithRoot(self):
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cpplint_test_header.h')
file_info = cpplint.FileInfo(file_path)
if file_info.FullName() == file_info.RepositoryName():
# When FileInfo cannot deduce the root directory of the repository,
# FileInfo.RepositoryName returns the same value as FileInfo.FullName.
# This can happen when this source file was obtained without .svn or
# .git directory. (e.g. using 'svn export' or 'git archive').
# Skip this test in such a case because --root flag makes sense only
# when the root directory of the repository is properly deduced.
return
self.assertEquals('CPPLINT_CPPLINT_TEST_HEADER_H_',
cpplint.GetHeaderGuardCPPVariable(file_path))
cpplint._root = 'cpplint'
self.assertEquals('CPPLINT_TEST_HEADER_H_',
cpplint.GetHeaderGuardCPPVariable(file_path))
# --root flag is ignored if an non-existent directory is specified.
cpplint._root = 'NON_EXISTENT_DIR'
self.assertEquals('CPPLINT_CPPLINT_TEST_HEADER_H_',
cpplint.GetHeaderGuardCPPVariable(file_path))
def testBuildInclude(self):
# Test that include statements have slashes in them.
self.TestLint('#include "foo.h"',
'Include the directory when naming .h files'
' [build/include] [4]')
self.TestLint('#include "Python.h"', '')
self.TestLint('#include "lua.h"', '')
def testBuildPrintfFormat(self):
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
[r'printf("\%%d", value);',
r'snprintf(buffer, sizeof(buffer), "\[%d", value);',
r'fprintf(file, "\(%d", value);',
r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);'],
error_collector)
self.assertEquals(
4,
error_collector.Results().count(
'%, [, (, and { are undefined character escapes. Unescape them.'
' [build/printf_format] [3]'))
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
'foo.cc', 'cc',
['// Copyright 2014 Your Company.',
r'printf("\\%%%d", value);',
r'printf(R"(\[)");',
r'printf(R"(\[%s)", R"(\])");',
''],
error_collector)
self.assertEquals('', error_collector.Results())
def testRuntimePrintfFormat(self):
self.TestLint(
r'fprintf(file, "%q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.TestLint(
r'aprintf(file, "The number is %12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.TestLint(
r'printf(file, "The number is" "%-12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.TestLint(
r'printf(file, "The number is" "%+12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.TestLint(
r'printf(file, "The number is" "% 12q", value);',
'%q in format strings is deprecated. Use %ll instead.'
' [runtime/printf_format] [3]')
self.TestLint(
r'snprintf(file, "Never mix %d and %1$d parameters!", value);',
'%N$ formats are unconventional. Try rewriting to avoid them.'
' [runtime/printf_format] [2]')
def TestLintLogCodeOnError(self, code, expected_message):
# Special TestLint which logs the input code on error.
result = self.PerformSingleLineLint(code)
if result != expected_message:
self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"'
% (code, result, expected_message))
def testBuildStorageClass(self):
qualifiers = [None, 'const', 'volatile']
signs = [None, 'signed', 'unsigned']
types = ['void', 'char', 'int', 'float', 'double',
'schar', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64']
storage_classes = ['extern', 'register', 'static', 'typedef']
build_storage_class_error_message = (
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration. [build/storage_class] [5]')
# Some explicit cases. Legal in C++, deprecated in C99.
self.TestLint('const int static foo = 5;',
build_storage_class_error_message)
self.TestLint('char static foo;',
build_storage_class_error_message)
self.TestLint('double const static foo = 2.0;',
build_storage_class_error_message)
self.TestLint('uint64 typedef unsigned_long_long;',
build_storage_class_error_message)
self.TestLint('int register foo = 0;',
build_storage_class_error_message)
# Since there are a very large number of possibilities, randomly
# construct declarations.
# Make sure that the declaration is logged if there's an error.
# Seed generator with an integer for absolute reproducibility.
random.seed(25)
for unused_i in range(10):
# Build up random list of non-storage-class declaration specs.
other_decl_specs = [random.choice(qualifiers), random.choice(signs),
random.choice(types)]
# remove None
other_decl_specs = [x for x in other_decl_specs if x is not None]
# shuffle
random.shuffle(other_decl_specs)
# insert storage class after the first
storage_class = random.choice(storage_classes)
insertion_point = random.randint(1, len(other_decl_specs))
decl_specs = (other_decl_specs[0:insertion_point]
+ [storage_class]
+ other_decl_specs[insertion_point:])
self.TestLintLogCodeOnError(
' '.join(decl_specs) + ';',
build_storage_class_error_message)
# but no error if storage class is first
self.TestLintLogCodeOnError(
storage_class + ' ' + ' '.join(other_decl_specs),
'')
def testLegalCopyright(self):
legal_copyright_message = (
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"'
' [legal/copyright] [5]')
copyright_line = '// Copyright 2014 Google Inc. All Rights Reserved.'
file_path = 'mydir/googleclient/foo.cc'
# There should be a copyright message in the first 10 lines
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'cc', [], error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(legal_copyright_message))
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
file_path, 'cc',
['' for unused_i in range(10)] + [copyright_line],
error_collector)
self.assertEquals(
1,
error_collector.ResultList().count(legal_copyright_message))
# Test that warning isn't issued if Copyright line appears early enough.
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(file_path, 'cc', [copyright_line], error_collector)
for message in error_collector.ResultList():
if message.find('legal/copyright') != -1:
self.fail('Unexpected error: %s' % message)
error_collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(
file_path, 'cc',
['' for unused_i in range(9)] + [copyright_line],
error_collector)
for message in error_collector.ResultList():
if message.find('legal/copyright') != -1:
self.fail('Unexpected error: %s' % message)
def testInvalidIncrement(self):
self.TestLint('*count++;',
'Changing pointer instead of value (or unused value of '
'operator*). [runtime/invalid_increment] [5]')
def testSnprintfSize(self):
self.TestLint('vsnprintf(NULL, 0, format)', '')
self.TestLint('snprintf(fisk, 1, format)',
'If you can, use sizeof(fisk) instead of 1 as the 2nd arg '
'to snprintf. [runtime/printf] [3]')
class Cxx11Test(CpplintTestBase):
def Helper(self, package, extension, lines, count):
filename = package + '/foo.' + extension
lines = lines[:]
# Header files need to have an ifdef guard wrapped around their code.
if extension == 'h':
guard = filename.upper().replace('/', '_').replace('.', '_') + '_'
lines.insert(0, '#ifndef ' + guard)
lines.insert(1, '#define ' + guard)
lines.append('#endif // ' + guard)
# All files need a final blank line.
lines.append('')
# Process the file and check resulting error count.
collector = ErrorCollector(self.assert_)
cpplint.ProcessFileData(filename, extension, lines, collector)
error_list = collector.ResultList()
self.assertEquals(count, len(error_list), error_list)
def TestCxx11Feature(self, code, expected_error):
lines = code.split('\n')
collector = ErrorCollector(self.assert_)
cpplint.RemoveMultiLineComments('foo.h', lines, collector)
clean_lines = cpplint.CleansedLines(lines)
cpplint.FlagCxx11Features('foo.cc', clean_lines, 0, collector)
self.assertEquals(expected_error, collector.Results())
def testBlockedHeaders(self):
self.TestCxx11Feature('#include <tr1/regex>',
'C++ TR1 headers such as <tr1/regex> are '
'unapproved. [build/c++tr1] [5]')
self.TestCxx11Feature('#include <mutex>',
'<mutex> is an unapproved C++11 header.'
' [build/c++11] [5]')
def testBlockedClasses(self):
self.TestCxx11Feature('std::alignment_of<T>',
'std::alignment_of is an unapproved '
'C++11 class or function. Send c-style an example '
'of where it would make your code more readable, '
'and they may let you use it.'
' [build/c++11] [5]')
self.TestCxx11Feature('std::alignment_offer', '')
self.TestCxx11Feature('mystd::alignment_of', '')
self.TestCxx11Feature('std::binomial_distribution', '')
def testBlockedFunctions(self):
self.TestCxx11Feature('std::alignment_of<int>',
'std::alignment_of is an unapproved '
'C++11 class or function. Send c-style an example '
'of where it would make your code more readable, '
'and they may let you use it.'
' [build/c++11] [5]')
# Missed because of the lack of "std::". Compiles because ADL
# looks in the namespace of my_shared_ptr, which (presumably) is
# std::. But there will be a lint error somewhere in this file
# since my_shared_ptr had to be defined.
self.TestCxx11Feature('static_pointer_cast<Base>(my_shared_ptr)', '')
self.TestCxx11Feature('std::declval<T>()', '')
def testExplicitMakePair(self):
self.TestLint('make_pair', '')
self.TestLint('make_pair(42, 42)', '')
self.TestLint('make_pair<',
'For C++11-compatibility, omit template arguments from'
' make_pair OR use pair directly OR if appropriate,'
' construct a pair directly'
' [build/explicit_make_pair] [4]')
self.TestLint('make_pair <',
'For C++11-compatibility, omit template arguments from'
' make_pair OR use pair directly OR if appropriate,'
' construct a pair directly'
' [build/explicit_make_pair] [4]')
self.TestLint('my_make_pair<int, int>', '')
class Cxx14Test(CpplintTestBase):
def TestCxx14Feature(self, code, expected_error):
lines = code.split('\n')
collector = ErrorCollector(self.assert_)
cpplint.RemoveMultiLineComments('foo.h', lines, collector)
clean_lines = cpplint.CleansedLines(lines)
cpplint.FlagCxx14Features('foo.cc', clean_lines, 0, collector)
self.assertEquals(expected_error, collector.Results())
def testBlockedHeaders(self):
self.TestCxx14Feature('#include <scoped_allocator>',
'<scoped_allocator> is an unapproved C++14 header.'
' [build/c++14] [5]')
self.TestCxx14Feature('#include <shared_mutex>',
'<shared_mutex> is an unapproved C++14 header.'
' [build/c++14] [5]')
class CleansedLinesTest(unittest.TestCase):
def testInit(self):
lines = ['Line 1',
'Line 2',
'Line 3 // Comment test',
'Line 4 /* Comment test */',
'Line 5 "foo"']
clean_lines = cpplint.CleansedLines(lines)
self.assertEquals(lines, clean_lines.raw_lines)
self.assertEquals(5, clean_lines.NumLines())
self.assertEquals(['Line 1',
'Line 2',
'Line 3',
'Line 4',
'Line 5 "foo"'],
clean_lines.lines)
self.assertEquals(['Line 1',
'Line 2',
'Line 3',
'Line 4',
'Line 5 ""'],
clean_lines.elided)
def testInitEmpty(self):
clean_lines = cpplint.CleansedLines([])
self.assertEquals([], clean_lines.raw_lines)
self.assertEquals(0, clean_lines.NumLines())
def testCollapseStrings(self):
collapse = cpplint.CleansedLines._CollapseStrings
self.assertEquals('""', collapse('""')) # "" (empty)
self.assertEquals('"""', collapse('"""')) # """ (bad)
self.assertEquals('""', collapse('"xyz"')) # "xyz" (string)
self.assertEquals('""', collapse('"\\\""')) # "\"" (string)
self.assertEquals('""', collapse('"\'"')) # "'" (string)
self.assertEquals('"\"', collapse('"\"')) # "\" (bad)
self.assertEquals('""', collapse('"\\\\"')) # "\\" (string)
self.assertEquals('"', collapse('"\\\\\\"')) # "\\\" (bad)
self.assertEquals('""', collapse('"\\\\\\\\"')) # "\\\\" (string)
self.assertEquals('\'\'', collapse('\'\'')) # '' (empty)
self.assertEquals('\'\'', collapse('\'a\'')) # 'a' (char)
self.assertEquals('\'\'', collapse('\'\\\'\'')) # '\'' (char)
self.assertEquals('\'', collapse('\'\\\'')) # '\' (bad)
self.assertEquals('', collapse('\\012')) # '\012' (char)
self.assertEquals('', collapse('\\xfF0')) # '\xfF0' (char)
self.assertEquals('', collapse('\\n')) # '\n' (char)
self.assertEquals(r'\#', collapse('\\#')) # '\#' (bad)
self.assertEquals('"" + ""', collapse('"\'" + "\'"'))
self.assertEquals("'', ''", collapse("'\"', '\"'"))
self.assertEquals('""[0b10]', collapse('"a\'b"[0b1\'0]'))
self.assertEquals('42', collapse("4'2"))
self.assertEquals('0b0101', collapse("0b0'1'0'1"))
self.assertEquals('1048576', collapse("1'048'576"))
self.assertEquals('0X100000', collapse("0X10'0000"))
self.assertEquals('0004000000', collapse("0'004'000'000"))
self.assertEquals('1.602176565e-19', collapse("1.602'176'565e-19"))
self.assertEquals('\'\' + 0xffff', collapse("'i' + 0xf'f'f'f"))
self.assertEquals('sizeof\'\' == 1', collapse("sizeof'x' == 1"))
self.assertEquals('0x.03p100', collapse('0x.0\'3p1\'0\'0'))
self.assertEquals('123.45', collapse('1\'23.4\'5'))
self.assertEquals('StringReplace(body, "", "");',
collapse('StringReplace(body, "\\\\", "\\\\\\\\");'))
self.assertEquals('\'\' ""',
collapse('\'"\' "foo"'))
class OrderOfIncludesTest(CpplintTestBase):
def setUp(self):
CpplintTestBase.setUp(self)
self.include_state = cpplint._IncludeState()
os.path.abspath = lambda value: value
def testCheckNextIncludeOrder_OtherThenCpp(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._OTHER_HEADER))
self.assertEqual('Found C++ system header after other header',
self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
def testCheckNextIncludeOrder_CppThenC(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
self.assertEqual('Found C system header after C++ system header',
self.include_state.CheckNextIncludeOrder(
cpplint._C_SYS_HEADER))
def testCheckNextIncludeOrder_LikelyThenCpp(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._LIKELY_MY_HEADER))
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
def testCheckNextIncludeOrder_PossibleThenCpp(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._POSSIBLE_MY_HEADER))
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
def testCheckNextIncludeOrder_CppThenLikely(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
# This will eventually fail.
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._LIKELY_MY_HEADER))
def testCheckNextIncludeOrder_CppThenPossible(self):
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._CPP_SYS_HEADER))
self.assertEqual('', self.include_state.CheckNextIncludeOrder(
cpplint._POSSIBLE_MY_HEADER))
def testClassifyInclude(self):
file_info = cpplint.FileInfo
classify_include = cpplint._ClassifyInclude
self.assertEqual(cpplint._C_SYS_HEADER,
classify_include(file_info('foo/foo.cc'),
'stdio.h',
True))
self.assertEqual(cpplint._CPP_SYS_HEADER,
classify_include(file_info('foo/foo.cc'),
'string',
True))
self.assertEqual(cpplint._CPP_SYS_HEADER,
classify_include(file_info('foo/foo.cc'),
'typeinfo',
True))
self.assertEqual(cpplint._OTHER_HEADER,
classify_include(file_info('foo/foo.cc'),
'string',
False))
self.assertEqual(cpplint._LIKELY_MY_HEADER,
classify_include(file_info('foo/foo.cc'),
'foo/foo-inl.h',
False))
self.assertEqual(cpplint._LIKELY_MY_HEADER,
classify_include(file_info('foo/internal/foo.cc'),
'foo/public/foo.h',
False))
self.assertEqual(cpplint._POSSIBLE_MY_HEADER,
classify_include(file_info('foo/internal/foo.cc'),
'foo/other/public/foo.h',
False))
self.assertEqual(cpplint._OTHER_HEADER,
classify_include(file_info('foo/internal/foo.cc'),
'foo/other/public/foop.h',
False))
def testTryDropCommonSuffixes(self):
self.assertEqual('foo/foo', cpplint._DropCommonSuffixes('foo/foo-inl.h'))
self.assertEqual('foo/bar/foo',
cpplint._DropCommonSuffixes('foo/bar/foo_inl.h'))
self.assertEqual('foo/foo', cpplint._DropCommonSuffixes('foo/foo.cc'))
self.assertEqual('foo/foo_unusualinternal',
cpplint._DropCommonSuffixes('foo/foo_unusualinternal.h'))
self.assertEqual('',
cpplint._DropCommonSuffixes('_test.cc'))
self.assertEqual('test',
cpplint._DropCommonSuffixes('test.cc'))
def testRegression(self):
def Format(includes):
include_list = []
for item in includes:
if item.startswith('"') or item.startswith('<'):
include_list.append('#include %s\n' % item)
else:
include_list.append(item + '\n')
return ''.join(include_list)
# Test singleton cases first.
self.TestLanguageRulesCheck('foo/foo.cc', Format(['"foo/foo.h"']), '')
self.TestLanguageRulesCheck('foo/foo.cc', Format(['<stdio.h>']), '')
self.TestLanguageRulesCheck('foo/foo.cc', Format(['<string>']), '')
self.TestLanguageRulesCheck('foo/foo.cc', Format(['"foo/foo-inl.h"']), '')
self.TestLanguageRulesCheck('foo/foo.cc', Format(['"bar/bar-inl.h"']), '')
self.TestLanguageRulesCheck('foo/foo.cc', Format(['"bar/bar.h"']), '')
# Test everything in a good and new order.
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['"foo/foo.h"',
'"foo/foo-inl.h"',
'<stdio.h>',
'<string>',
'<unordered_map>',
'"bar/bar-inl.h"',
'"bar/bar.h"']),
'')
# Test bad orders.
self.TestLanguageRulesCheck(
'foo/foo.cc',
Format(['<string>', '<stdio.h>']),
'Found C system header after C++ system header.'
' Should be: foo.h, c system, c++ system, other.'
' [build/include_order] [4]')
self.TestLanguageRulesCheck(
'foo/foo.cc',
Format(['"foo/bar-inl.h"',
'"foo/foo-inl.h"']),
'')
self.TestLanguageRulesCheck(
'foo/foo.cc',
Format(['"foo/e.h"',
'"foo/b.h"', # warning here (e>b)
'"foo/c.h"',
'"foo/d.h"',
'"foo/a.h"']), # warning here (d>a)
['Include "foo/b.h" not in alphabetical order'
' [build/include_alpha] [4]',
'Include "foo/a.h" not in alphabetical order'
' [build/include_alpha] [4]'])
# -inl.h headers are no longer special.
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['"foo/foo-inl.h"', '<string>']),
'')
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['"foo/bar.h"', '"foo/bar-inl.h"']),
'')
# Test componentized header. OK to have my header in ../public dir.
self.TestLanguageRulesCheck('foo/internal/foo.cc',
Format(['"foo/public/foo.h"', '<string>']),
'')
# OK to have my header in other dir (not stylistically, but
# cpplint isn't as good as a human).
self.TestLanguageRulesCheck('foo/internal/foo.cc',
Format(['"foo/other/public/foo.h"',
'<string>']),
'')
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['"foo/foo.h"',
'<string>',
'"base/google.h"',
'"base/flags.h"']),
'Include "base/flags.h" not in alphabetical '
'order [build/include_alpha] [4]')
# According to the style, -inl.h should come before .h, but we don't
# complain about that.
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['"foo/foo-inl.h"',
'"foo/foo.h"',
'"base/google.h"',
'"base/google-inl.h"']),
'')
# Allow project includes to be separated by blank lines
self.TestLanguageRulesCheck('a/a.cc',
Format(['"a/a.h"',
'<string>',
'"base/google.h"',
'',
'"b/c.h"',
'',
'MACRO',
'"a/b.h"']),
'')
self.TestLanguageRulesCheck('a/a.cc',
Format(['"a/a.h"',
'<string>',
'"base/google.h"',
'"a/b.h"']),
'Include "a/b.h" not in alphabetical '
'order [build/include_alpha] [4]')
# Test conditional includes
self.TestLanguageRulesCheck(
'a/a.cc',
''.join(['#include <string.h>\n',
'#include "base/port.h"\n',
'#include <initializer_list>\n']),
('Found C++ system header after other header. '
'Should be: a.h, c system, c++ system, other. '
'[build/include_order] [4]'))
self.TestLanguageRulesCheck(
'a/a.cc',
''.join(['#include <string.h>\n',
'#include "base/port.h"\n',
'#ifdef LANG_CXX11\n',
'#include <initializer_list>\n',
'#endif // LANG_CXX11\n']),
'')
self.TestLanguageRulesCheck(
'a/a.cc',
''.join(['#include <string.h>\n',
'#ifdef LANG_CXX11\n',
'#include "base/port.h"\n',
'#include <initializer_list>\n',
'#endif // LANG_CXX11\n']),
('Found C++ system header after other header. '
'Should be: a.h, c system, c++ system, other. '
'[build/include_order] [4]'))
# Third party headers are exempt from order checks
self.TestLanguageRulesCheck('foo/foo.cc',
Format(['<string>', '"Python.h"', '<vector>']),
'')
class CheckForFunctionLengthsTest(CpplintTestBase):
def setUp(self):
# Reducing these thresholds for the tests speeds up tests significantly.
self.old_normal_trigger = cpplint._FunctionState._NORMAL_TRIGGER
self.old_test_trigger = cpplint._FunctionState._TEST_TRIGGER
cpplint._FunctionState._NORMAL_TRIGGER = 10
cpplint._FunctionState._TEST_TRIGGER = 25
def tearDown(self):
cpplint._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger
cpplint._FunctionState._TEST_TRIGGER = self.old_test_trigger
def TestFunctionLengthsCheck(self, code, expected_message):
"""Check warnings for long function bodies are as expected.
Args:
code: C++ source code expected to generate a warning message.
expected_message: Message expected to be generated by the C++ code.
"""
self.assertEquals(expected_message,
self.PerformFunctionLengthsCheck(code))
def TriggerLines(self, error_level):
"""Return number of lines needed to trigger a function length warning.
Args:
error_level: --v setting for cpplint.
Returns:
Number of lines needed to trigger a function length warning.
"""
return cpplint._FunctionState._NORMAL_TRIGGER * 2**error_level
def TestLines(self, error_level):
"""Return number of lines needed to trigger a test function length warning.
Args:
error_level: --v setting for cpplint.
Returns:
Number of lines needed to trigger a test function length warning.
"""
return cpplint._FunctionState._TEST_TRIGGER * 2**error_level
def TestFunctionLengthCheckDefinition(self, lines, error_level):
"""Generate long function definition and check warnings are as expected.
Args:
lines: Number of lines to generate.
error_level: --v setting for cpplint.
"""
trigger_level = self.TriggerLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
'void test(int x)' + self.FunctionBody(lines),
('Small and focused functions are preferred: '
'test() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]'
% (lines, trigger_level, error_level)))
def TestFunctionLengthCheckDefinitionOK(self, lines):
"""Generate shorter function definition and check no warning is produced.
Args:
lines: Number of lines to generate.
"""
self.TestFunctionLengthsCheck(
'void test(int x)' + self.FunctionBody(lines),
'')
def TestFunctionLengthCheckAtErrorLevel(self, error_level):
"""Generate and check function at the trigger level for --v setting.
Args:
error_level: --v setting for cpplint.
"""
self.TestFunctionLengthCheckDefinition(self.TriggerLines(error_level),
error_level)
def TestFunctionLengthCheckBelowErrorLevel(self, error_level):
"""Generate and check function just below the trigger level for --v setting.
Args:
error_level: --v setting for cpplint.
"""
self.TestFunctionLengthCheckDefinition(self.TriggerLines(error_level)-1,
error_level-1)
def TestFunctionLengthCheckAboveErrorLevel(self, error_level):
"""Generate and check function just above the trigger level for --v setting.
Args:
error_level: --v setting for cpplint.
"""
self.TestFunctionLengthCheckDefinition(self.TriggerLines(error_level)+1,
error_level)
def FunctionBody(self, number_of_lines):
return ' {\n' + ' this_is_just_a_test();\n'*number_of_lines + '}'
def FunctionBodyWithBlankLines(self, number_of_lines):
return ' {\n' + ' this_is_just_a_test();\n\n'*number_of_lines + '}'
def FunctionBodyWithNoLints(self, number_of_lines):
return (' {\n' +
' this_is_just_a_test(); // NOLINT\n'*number_of_lines + '}')
# Test line length checks.
def testFunctionLengthCheckDeclaration(self):
self.TestFunctionLengthsCheck(
'void test();', # Not a function definition
'')
def testFunctionLengthCheckDeclarationWithBlockFollowing(self):
self.TestFunctionLengthsCheck(
('void test();\n'
+ self.FunctionBody(66)), # Not a function definition
'')
def testFunctionLengthCheckClassDefinition(self):
self.TestFunctionLengthsCheck( # Not a function definition
'class Test' + self.FunctionBody(66) + ';',
'')
def testFunctionLengthCheckTrivial(self):
self.TestFunctionLengthsCheck(
'void test() {}', # Not counted
'')
def testFunctionLengthCheckEmpty(self):
self.TestFunctionLengthsCheck(
'void test() {\n}',
'')
def testFunctionLengthCheckDefinitionBelowSeverity0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckDefinitionOK(self.TriggerLines(0)-1)
cpplint._SetVerboseLevel(old_verbosity)
def testFunctionLengthCheckDefinitionAtSeverity0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckDefinitionOK(self.TriggerLines(0))
cpplint._SetVerboseLevel(old_verbosity)
def testFunctionLengthCheckDefinitionAboveSeverity0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckAboveErrorLevel(0)
cpplint._SetVerboseLevel(old_verbosity)
def testFunctionLengthCheckDefinitionBelowSeverity1v0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckBelowErrorLevel(1)
cpplint._SetVerboseLevel(old_verbosity)
def testFunctionLengthCheckDefinitionAtSeverity1v0(self):
old_verbosity = cpplint._SetVerboseLevel(0)
self.TestFunctionLengthCheckAtErrorLevel(1)
cpplint._SetVerboseLevel(old_verbosity)
def testFunctionLengthCheckDefinitionBelowSeverity1(self):
self.TestFunctionLengthCheckDefinitionOK(self.TriggerLines(1)-1)
def testFunctionLengthCheckDefinitionAtSeverity1(self):
self.TestFunctionLengthCheckDefinitionOK(self.TriggerLines(1))
def testFunctionLengthCheckDefinitionAboveSeverity1(self):
self.TestFunctionLengthCheckAboveErrorLevel(1)
def testFunctionLengthCheckDefinitionSeverity1PlusBlanks(self):
error_level = 1
error_lines = self.TriggerLines(error_level) + 1
trigger_level = self.TriggerLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
'void test_blanks(int x)' + self.FunctionBody(error_lines),
('Small and focused functions are preferred: '
'test_blanks() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def testFunctionLengthCheckComplexDefinitionSeverity1(self):
error_level = 1
error_lines = self.TriggerLines(error_level) + 1
trigger_level = self.TriggerLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
('my_namespace::my_other_namespace::MyVeryLongTypeName*\n'
'my_namespace::my_other_namespace::MyFunction(int arg1, char* arg2)'
+ self.FunctionBody(error_lines)),
('Small and focused functions are preferred: '
'my_namespace::my_other_namespace::MyFunction()'
' has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def testFunctionLengthCheckDefinitionSeverity1ForTest(self):
error_level = 1
error_lines = self.TestLines(error_level) + 1
trigger_level = self.TestLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
'TEST_F(Test, Mutator)' + self.FunctionBody(error_lines),
('Small and focused functions are preferred: '
'TEST_F(Test, Mutator) has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def testFunctionLengthCheckDefinitionSeverity1ForSplitLineTest(self):
error_level = 1
error_lines = self.TestLines(error_level) + 1
trigger_level = self.TestLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n'
' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces
+ self.FunctionBody(error_lines)),
('Small and focused functions are preferred: '
'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space
'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines+1, trigger_level, error_level))
def testFunctionLengthCheckDefinitionSeverity1ForBadTestDoesntBreak(self):
error_level = 1
error_lines = self.TestLines(error_level) + 1
trigger_level = self.TestLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
('TEST_F('
+ self.FunctionBody(error_lines)),
('Small and focused functions are preferred: '
'TEST_F has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def testFunctionLengthCheckDefinitionSeverity1WithEmbeddedNoLints(self):
error_level = 1
error_lines = self.TriggerLines(error_level)+1
trigger_level = self.TriggerLines(cpplint._VerboseLevel())
self.TestFunctionLengthsCheck(
'void test(int x)' + self.FunctionBodyWithNoLints(error_lines),
('Small and focused functions are preferred: '
'test() has %d non-comment lines '
'(error triggered by exceeding %d lines).'
' [readability/fn_size] [%d]')
% (error_lines, trigger_level, error_level))
def testFunctionLengthCheckDefinitionSeverity1WithNoLint(self):
self.TestFunctionLengthsCheck(
('void test(int x)' + self.FunctionBody(self.TriggerLines(1))
+ ' // NOLINT -- long function'),
'')
def testFunctionLengthCheckDefinitionBelowSeverity2(self):
self.TestFunctionLengthCheckBelowErrorLevel(2)
def testFunctionLengthCheckDefinitionSeverity2(self):
self.TestFunctionLengthCheckAtErrorLevel(2)
def testFunctionLengthCheckDefinitionAboveSeverity2(self):
self.TestFunctionLengthCheckAboveErrorLevel(2)
def testFunctionLengthCheckDefinitionBelowSeverity3(self):
self.TestFunctionLengthCheckBelowErrorLevel(3)
def testFunctionLengthCheckDefinitionSeverity3(self):
self.TestFunctionLengthCheckAtErrorLevel(3)
def testFunctionLengthCheckDefinitionAboveSeverity3(self):
self.TestFunctionLengthCheckAboveErrorLevel(3)
def testFunctionLengthCheckDefinitionBelowSeverity4(self):
self.TestFunctionLengthCheckBelowErrorLevel(4)
def testFunctionLengthCheckDefinitionSeverity4(self):
self.TestFunctionLengthCheckAtErrorLevel(4)
def testFunctionLengthCheckDefinitionAboveSeverity4(self):
self.TestFunctionLengthCheckAboveErrorLevel(4)
def testFunctionLengthCheckDefinitionBelowSeverity5(self):
self.TestFunctionLengthCheckBelowErrorLevel(5)
def testFunctionLengthCheckDefinitionAtSeverity5(self):
self.TestFunctionLengthCheckAtErrorLevel(5)
def testFunctionLengthCheckDefinitionAboveSeverity5(self):
self.TestFunctionLengthCheckAboveErrorLevel(5)
def testFunctionLengthCheckDefinitionHugeLines(self):
# 5 is the limit
self.TestFunctionLengthCheckDefinition(self.TriggerLines(10), 5)
def testFunctionLengthNotDeterminable(self):
# Macro invocation without terminating semicolon.
self.TestFunctionLengthsCheck(
'MACRO(arg)',
'')
# Macro with underscores
self.TestFunctionLengthsCheck(
'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)',
'')
self.TestFunctionLengthsCheck(
'NonMacro(arg)',
'Lint failed to find start of function body.'
' [readability/fn_size] [5]')
def testFunctionLengthCheckWithNamespace(self):
old_verbosity = cpplint._SetVerboseLevel(1)
self.TestFunctionLengthsCheck(
('namespace {\n'
'void CodeCoverageCL35256059() {\n' +
(' X++;\n' * 3000) +
'}\n'
'} // namespace\n'),
('Small and focused functions are preferred: '
'CodeCoverageCL35256059() has 3000 non-comment lines '
'(error triggered by exceeding 20 lines).'
' [readability/fn_size] [5]'))
cpplint._SetVerboseLevel(old_verbosity)
def TrimExtraIndent(text_block):
"""Trim a uniform amount of whitespace off of each line in a string.
Compute the minimum indent on all non blank lines and trim that from each, so
that the block of text has no extra indentation.
Args:
text_block: a multiline string
Returns:
text_block with the common whitespace indent of each line removed.
"""
def CountLeadingWhitespace(s):
count = 0
for c in s:
if not c.isspace():
break
count += 1
return count
# find the minimum indent (except for blank lines)
min_indent = min([CountLeadingWhitespace(line)
for line in text_block.split('\n') if line])
return '\n'.join([line[min_indent:] for line in text_block.split('\n')])
class CloseExpressionTest(unittest.TestCase):
def setUp(self):
self.lines = cpplint.CleansedLines(
# 1 2 3 4 5
# 0123456789012345678901234567890123456789012345678901234567890
['// Line 0',
'inline RCULocked<X>::ReadPtr::ReadPtr(const RCULocked* rcu) {',
' DCHECK(!(data & kFlagMask)) << "Error";',
'}',
'// Line 4',
'RCULocked<X>::WritePtr::WritePtr(RCULocked* rcu)',
' : lock_(&rcu_->mutex_) {',
'}',
'// Line 8',
'template <typename T, typename... A>',
'typename std::enable_if<',
' std::is_array<T>::value && (std::extent<T>::value > 0)>::type',
'MakeUnique(A&&... a) = delete;',
'// Line 13',
'auto x = []() {};',
'// Line 15',
'template <typename U>',
'friend bool operator==(const reffed_ptr& a,',
' const reffed_ptr<U>& b) {',
' return a.get() == b.get();',
'}',
'// Line 21'])
def testCloseExpression(self):
# List of positions to test:
# (start line, start position, end line, end position + 1)
positions = [(1, 16, 1, 19),
(1, 37, 1, 59),
(1, 60, 3, 1),
(2, 8, 2, 29),
(2, 30, 22, -1), # Left shift operator
(9, 9, 9, 36),
(10, 23, 11, 59),
(11, 54, 22, -1), # Greater than operator
(14, 9, 14, 11),
(14, 11, 14, 13),
(14, 14, 14, 16),
(17, 22, 18, 46),
(18, 47, 20, 1)]
for p in positions:
(_, line, column) = cpplint.CloseExpression(self.lines, p[0], p[1])
self.assertEquals((p[2], p[3]), (line, column))
def testReverseCloseExpression(self):
# List of positions to test:
# (end line, end position, start line, start position)
positions = [(1, 18, 1, 16),
(1, 58, 1, 37),
(2, 27, 2, 10),
(2, 28, 2, 8),
(6, 18, 0, -1), # -> operator
(9, 35, 9, 9),
(11, 54, 0, -1), # Greater than operator
(11, 57, 11, 31),
(14, 10, 14, 9),
(14, 12, 14, 11),
(14, 15, 14, 14),
(18, 45, 17, 22),
(20, 0, 18, 47)]
for p in positions:
(_, line, column) = cpplint.ReverseCloseExpression(self.lines, p[0], p[1])
self.assertEquals((p[2], p[3]), (line, column))
class NestingStateTest(unittest.TestCase):
def setUp(self):
self.nesting_state = cpplint.NestingState()
self.error_collector = ErrorCollector(self.assert_)
def UpdateWithLines(self, lines):
clean_lines = cpplint.CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
self.nesting_state.Update('test.cc',
clean_lines, line, self.error_collector)
def testEmpty(self):
self.UpdateWithLines([])
self.assertEquals(self.nesting_state.stack, [])
def testNamespace(self):
self.UpdateWithLines(['namespace {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0],
cpplint._NamespaceInfo))
self.assertTrue(self.nesting_state.stack[0].seen_open_brace)
self.assertEquals(self.nesting_state.stack[0].name, '')
self.UpdateWithLines(['namespace outer { namespace inner'])
self.assertEquals(len(self.nesting_state.stack), 3)
self.assertTrue(self.nesting_state.stack[0].seen_open_brace)
self.assertTrue(self.nesting_state.stack[1].seen_open_brace)
self.assertFalse(self.nesting_state.stack[2].seen_open_brace)
self.assertEquals(self.nesting_state.stack[0].name, '')
self.assertEquals(self.nesting_state.stack[1].name, 'outer')
self.assertEquals(self.nesting_state.stack[2].name, 'inner')
self.UpdateWithLines(['{'])
self.assertTrue(self.nesting_state.stack[2].seen_open_brace)
self.UpdateWithLines(['}', '}}'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testClass(self):
self.UpdateWithLines(['class A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'A')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.assertEquals(self.nesting_state.stack[0].class_indent, 0)
self.UpdateWithLines(['};',
'struct B : public A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'B')
self.assertTrue(self.nesting_state.stack[0].is_derived)
self.UpdateWithLines(['};',
'class C',
': public A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'C')
self.assertTrue(self.nesting_state.stack[0].is_derived)
self.UpdateWithLines(['};',
'template<T>'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['class D {', ' class E {'])
self.assertEquals(len(self.nesting_state.stack), 2)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.assertTrue(isinstance(self.nesting_state.stack[1], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[1].name, 'E')
self.assertFalse(self.nesting_state.stack[1].is_derived)
self.assertEquals(self.nesting_state.stack[1].class_indent, 2)
self.assertEquals(self.nesting_state.InnermostClass().name, 'E')
self.UpdateWithLines(['}', '}'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testClassAccess(self):
self.UpdateWithLines(['class A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].access, 'private')
self.UpdateWithLines([' public:'])
self.assertEquals(self.nesting_state.stack[0].access, 'public')
self.UpdateWithLines([' protracted:'])
self.assertEquals(self.nesting_state.stack[0].access, 'public')
self.UpdateWithLines([' protected:'])
self.assertEquals(self.nesting_state.stack[0].access, 'protected')
self.UpdateWithLines([' private:'])
self.assertEquals(self.nesting_state.stack[0].access, 'private')
self.UpdateWithLines([' struct B {'])
self.assertEquals(len(self.nesting_state.stack), 2)
self.assertTrue(isinstance(self.nesting_state.stack[1], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[1].access, 'public')
self.assertEquals(self.nesting_state.stack[0].access, 'private')
self.UpdateWithLines([' protected :'])
self.assertEquals(self.nesting_state.stack[1].access, 'protected')
self.assertEquals(self.nesting_state.stack[0].access, 'private')
self.UpdateWithLines([' }', '}'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testStruct(self):
self.UpdateWithLines(['struct A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'A')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.UpdateWithLines(['}',
'void Func(struct B arg) {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertFalse(isinstance(self.nesting_state.stack[0],
cpplint._ClassInfo))
self.UpdateWithLines(['}'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testPreprocessor(self):
self.assertEquals(len(self.nesting_state.pp_stack), 0)
self.UpdateWithLines(['#if MACRO1'])
self.assertEquals(len(self.nesting_state.pp_stack), 1)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.pp_stack), 0)
self.UpdateWithLines(['#ifdef MACRO2'])
self.assertEquals(len(self.nesting_state.pp_stack), 1)
self.UpdateWithLines(['#else'])
self.assertEquals(len(self.nesting_state.pp_stack), 1)
self.UpdateWithLines(['#ifdef MACRO3'])
self.assertEquals(len(self.nesting_state.pp_stack), 2)
self.UpdateWithLines(['#elif MACRO4'])
self.assertEquals(len(self.nesting_state.pp_stack), 2)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.pp_stack), 1)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.pp_stack), 0)
self.UpdateWithLines(['#ifdef MACRO5',
'class A {',
'#elif MACRO6',
'class B {',
'#else',
'class C {',
'#endif'])
self.assertEquals(len(self.nesting_state.pp_stack), 0)
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'A')
self.UpdateWithLines(['};'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['class D',
'#ifdef MACRO7'])
self.assertEquals(len(self.nesting_state.pp_stack), 1)
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.UpdateWithLines(['#elif MACRO8',
': public E'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.assertTrue(self.nesting_state.stack[0].is_derived)
self.assertFalse(self.nesting_state.stack[0].seen_open_brace)
self.UpdateWithLines(['#else',
'{'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.assertTrue(self.nesting_state.stack[0].seen_open_brace)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.pp_stack), 0)
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.assertFalse(self.nesting_state.stack[0].is_derived)
self.assertFalse(self.nesting_state.stack[0].seen_open_brace)
self.UpdateWithLines([';'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testTemplate(self):
self.UpdateWithLines(['template <T,',
' class Arg1 = tmpl<T> >'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['class A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'A')
self.UpdateWithLines(['};',
'template <T,',
' template <typename, typename> class B>',
'class C'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'C')
self.UpdateWithLines([';'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['class D : public Tmpl<E>'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'D')
self.UpdateWithLines(['{', '};'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['template <class F,',
' class G,',
' class H,',
' typename I>',
'static void Func() {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertFalse(isinstance(self.nesting_state.stack[0],
cpplint._ClassInfo))
self.UpdateWithLines(['}',
'template <class J> class K {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'K')
def testTemplateInnerClass(self):
self.UpdateWithLines(['class A {',
' public:'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.UpdateWithLines([' template <class B>',
' class C<alloc<B> >',
' : public A {'])
self.assertEquals(len(self.nesting_state.stack), 2)
self.assertTrue(isinstance(self.nesting_state.stack[1], cpplint._ClassInfo))
def testArguments(self):
self.UpdateWithLines(['class A {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'A')
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.UpdateWithLines([' void Func(',
' struct X arg1,'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.UpdateWithLines([' struct X *arg2);'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.UpdateWithLines(['};'])
self.assertEquals(len(self.nesting_state.stack), 0)
self.UpdateWithLines(['struct B {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertTrue(isinstance(self.nesting_state.stack[0], cpplint._ClassInfo))
self.assertEquals(self.nesting_state.stack[0].name, 'B')
self.UpdateWithLines(['#ifdef MACRO',
' void Func(',
' struct X arg1'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.UpdateWithLines(['#else'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.UpdateWithLines([' void Func(',
' struct X arg1'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.UpdateWithLines([' struct X *arg2);'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.UpdateWithLines(['};'])
self.assertEquals(len(self.nesting_state.stack), 0)
def testInlineAssembly(self):
self.UpdateWithLines(['void CopyRow_SSE2(const uint8* src, uint8* dst,',
' int count) {'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.assertEquals(self.nesting_state.stack[-1].inline_asm, cpplint._NO_ASM)
self.UpdateWithLines([' asm volatile ('])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.assertEquals(self.nesting_state.stack[-1].inline_asm,
cpplint._INSIDE_ASM)
self.UpdateWithLines([' "sub %0,%1 \\n"',
' "1: \\n"',
' "movdqa (%0),%%xmm0 \\n"',
' "movdqa 0x10(%0),%%xmm1 \\n"',
' "movdqa %%xmm0,(%0,%1) \\n"',
' "movdqa %%xmm1,0x10(%0,%1) \\n"',
' "lea 0x20(%0),%0 \\n"',
' "sub $0x20,%2 \\n"',
' "jg 1b \\n"',
' : "+r"(src), // %0',
' "+r"(dst), // %1',
' "+r"(count) // %2',
' :',
' : "memory", "cc"'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.assertEquals(self.nesting_state.stack[-1].inline_asm,
cpplint._INSIDE_ASM)
self.UpdateWithLines(['#if defined(__SSE2__)',
' , "xmm0", "xmm1"'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.assertEquals(self.nesting_state.stack[-1].inline_asm,
cpplint._INSIDE_ASM)
self.UpdateWithLines(['#endif'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 1)
self.assertEquals(self.nesting_state.stack[-1].inline_asm,
cpplint._INSIDE_ASM)
self.UpdateWithLines([' );'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.assertEquals(self.nesting_state.stack[-1].inline_asm, cpplint._END_ASM)
self.UpdateWithLines(['__asm {'])
self.assertEquals(len(self.nesting_state.stack), 2)
self.assertEquals(self.nesting_state.stack[-1].open_parentheses, 0)
self.assertEquals(self.nesting_state.stack[-1].inline_asm,
cpplint._BLOCK_ASM)
self.UpdateWithLines(['}'])
self.assertEquals(len(self.nesting_state.stack), 1)
self.UpdateWithLines(['}'])
self.assertEquals(len(self.nesting_state.stack), 0)
# pylint: disable-msg=C6409
def setUp():
"""Runs before all tests are executed.
"""
# Enable all filters, so we don't miss anything that is off by default.
cpplint._DEFAULT_FILTERS = []
cpplint._cpplint_state.SetFilters('')
# pylint: disable-msg=C6409
def tearDown():
"""A global check to make sure all error-categories have been tested.
The main tearDown() routine is the only code we can guarantee will be
run after all other tests have been executed.
"""
try:
if _run_verifyallcategoriesseen:
ErrorCollector(None).VerifyAllCategoriesAreSeen()
except NameError:
# If nobody set the global _run_verifyallcategoriesseen, then
# we assume we should silently not run the test
pass
if __name__ == '__main__':
# We don't want to run the VerifyAllCategoriesAreSeen() test unless
# we're running the full test suite: if we only run one test,
# obviously we're not going to see all the error categories. So we
# only run VerifyAllCategoriesAreSeen() when no commandline flags
# are passed in.
global _run_verifyallcategoriesseen
_run_verifyallcategoriesseen = (len(sys.argv) == 1)
setUp()
unittest.main()
tearDown()
|
fishpepper/OpenSky
|
stylecheck/cpplint_unittest.py
|
Python
|
gpl-3.0
| 228,992
|
"""
Component to interface with various locks that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/lock/
"""
from datetime import timedelta
import functools as ft
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED,
SERVICE_LOCK, SERVICE_UNLOCK, SERVICE_OPEN)
from homeassistant.components import group
ATTR_CHANGED_BY = 'changed_by'
DOMAIN = 'lock'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_ALL_LOCKS = group.ENTITY_ID_FORMAT.format('all_locks')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_LOCKS = 'all locks'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
LOCK_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
# Bitfield of features supported by the lock entity
SUPPORT_OPEN = 1
_LOGGER = logging.getLogger(__name__)
PROP_TO_ATTR = {
'changed_by': ATTR_CHANGED_BY,
'code_format': ATTR_CODE_FORMAT,
}
@bind_hass
def is_locked(hass, entity_id=None):
"""Return if the lock is locked based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LOCKS
return hass.states.is_state(entity_id, STATE_LOCKED)
async def async_setup(hass, config):
"""Track states and offer events for locks."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_UNLOCK, LOCK_SERVICE_SCHEMA,
'async_unlock'
)
component.async_register_entity_service(
SERVICE_LOCK, LOCK_SERVICE_SCHEMA,
'async_lock'
)
component.async_register_entity_service(
SERVICE_OPEN, LOCK_SERVICE_SCHEMA,
'async_open'
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
class LockDevice(Entity):
"""Representation of a lock."""
@property
def changed_by(self):
"""Last change triggered by."""
return None
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def is_locked(self):
"""Return true if the lock is locked."""
return None
def lock(self, **kwargs):
"""Lock the lock."""
raise NotImplementedError()
def async_lock(self, **kwargs):
"""Lock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.lock, **kwargs))
def unlock(self, **kwargs):
"""Unlock the lock."""
raise NotImplementedError()
def async_unlock(self, **kwargs):
"""Unlock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.unlock, **kwargs))
def open(self, **kwargs):
"""Open the door latch."""
raise NotImplementedError()
def async_open(self, **kwargs):
"""Open the door latch.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.open, **kwargs))
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
state_attr[attr] = value
return state_attr
@property
def state(self):
"""Return the state."""
locked = self.is_locked
if locked is None:
return None
return STATE_LOCKED if locked else STATE_UNLOCKED
|
PetePriority/home-assistant
|
homeassistant/components/lock/__init__.py
|
Python
|
apache-2.0
| 4,322
|
from __future__ import with_statement
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from flask_debugtoolbar.panels import DebugPanel
from flask_debugtoolbar.utils import format_fname
_ = lambda x: x
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is
provided, returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = None
_init_lock = threading.Lock()
def _init_once():
global handler
if handler is not None:
return
with _init_lock:
if handler is not None:
return
# Call werkzeug's internal logging to make sure it gets configured
# before we add our handler. Otherwise werkzeug will see our handler
# and not configure console logging for the request log.
# Werkzeug's default log level is INFO so this message probably won't
# be seen.
try:
from werkzeug._internal import _log
except ImportError:
pass
else:
_log('debug', 'Initializing Flask-DebugToolbar log handler')
handler = ThreadTrackingHandler()
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
_init_once()
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_records()
return records
def nav_title(self):
return _("Logging")
def nav_subtitle(self):
# FIXME l10n: use ngettext
num_records = len(handler.get_records())
return '%s message%s' % (num_records, '' if num_records == 1 else 's')
def title(self):
return _('Log Messages')
def url(self):
return ''
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': format_fname(record.pathname),
'file_long': record.pathname,
'line': record.lineno,
})
context = self.context.copy()
context.update({'records': records})
return self.render('panels/logger.html', context)
|
morreene/tradenews
|
venv/Lib/site-packages/flask_debugtoolbar/panels/logger.py
|
Python
|
bsd-3-clause
| 3,262
|
import unittest
from unittest.mock import mock_open, patch
from pandas import Series, DataFrame
from pandas.util.testing import assert_series_equal, assert_frame_equal, assert_panel_equal
from fa.database import models
class PandasTestCase(unittest.TestCase):
""" For test case with assertion involving Pandas NDFrame objects """
def assertFrameEqual(self, first, second, msg=None, **kwargs):
""" Fail if the two NDFrame objects of the same type are unequal at any value
as determined by the '==' operator (two NaNs are treated as equal).
It reuses assertion functions in pandas.util.testing internally, but produces
clearer failure message.
Any extra extra keyword arguments are passed on to the assertion function.
"""
if isinstance(first, Series):
assert_func = assert_series_equal
elif isinstance(first, DataFrame):
assert_func = assert_frame_equal
else:
assert_func = assert_panel_equal
try:
assert_func(first, second, **kwargs)
except AssertionError:
raise self.failureException(msg or "\n{0}\n!=\n{1}".format(first, second))
class FileIOTestCase(unittest.TestCase):
""" For testing code involving file io operations """
def setUp(self):
""" patches the open function with a mock, to be undone after test. """
self.mo = mock_open()
patcher = patch("builtins.open", self.mo)
patcher.start()
self.addCleanup(patcher.stop)
def get_written_string(self):
return ''.join(c[0][0] for c in self.mo.return_value.write.call_args_list)
class DBTestCase(unittest.TestCase):
""" For testing code involving database io operations
Note: this class interacts with models module
"""
test_db_path = ":memory:"
@classmethod
def _cleanup(cls):
""" closes connection """
models.db.close()
@classmethod
def setUpClass(cls):
""" creates test database and all tables """
try:
models.db.init(cls.test_db_path)
models.db.execute_sql("PRAGMA foreign_keys = ON")
models.db.create_tables(models.export)
except:
cls._cleanup() # immediately performs cleanup if exception occurs
raise
@classmethod
def tearDownClass(cls):
""" does cleanup after all tests """
cls._cleanup()
|
kakarukeys/algo-fa
|
tests/util.py
|
Python
|
gpl-2.0
| 2,457
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperbolic DQN agent."""
import collections
import math
from dopamine.agents.dqn import dqn_agent
import gin
import tensorflow.compat.v1 as tf
from hyperbolic_discount import agent_utils
from hyperbolic_discount.replay_memory import circular_replay_buffer
from tensorflow.contrib import slim
@gin.configurable
class HyperDQNAgent(dqn_agent.DQNAgent):
"""A compact implementation of a Hyperbolic DQN agent."""
def __init__(self,
sess,
num_actions,
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=dqn_agent.NATURE_DQN_DTYPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
number_of_gammas=8,
gamma_max=0.99,
acting_policy='hyperbolic',
hyp_exponent=1.0,
integral_estimate='lower',
update_horizon=1,
min_replay_history=20000,
update_period=4,
target_update_period=8000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.01,
epsilon_eval=0.001,
epsilon_decay_period=250000,
replay_scheme='uniform',
gradient_clipping_norm=None,
network_size_expansion=1.0,
tf_device='/cpu:*',
use_staging=True,
optimizer=tf.train.AdamOptimizer(
learning_rate=0.00025, epsilon=0.0003125),
summary_writer=None,
summary_writing_frequency=50000):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: `tf.Session`, for executing ops.
num_actions: int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: tf.DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to tf.float32.
stack_size: int, number of frames to use in state stack.
number_of_gammas: int, the number of gammas to estimate in parallel.
gamma_max: int, the maximum gammas we will learn via Bellman updates.
acting_policy: str, the policy with which the agent will act. One of
['hyperbolic', 'largest_gamma']
hyp_exponent: float, the parameter k in the equation 1. / (1. + k * t)
for hyperbolic discounting. Smaller parameter will lead to a longer
horizon.
integral_estimate: str, how to estimate the integral of the hyperbolic
discount.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_fn: function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon). This function should return the epsilon value
used for exploration during training.
epsilon_train: float, the value to which the agent's epsilon is eventually
decayed during training.
epsilon_eval: float, epsilon used when evaluating the agent.
epsilon_decay_period: int, length of the epsilon decay schedule.
replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
replay memory.
gradient_clipping_norm: str, if not None, this will set the gradient
clipping value.
network_size_expansion: float, the multiplier on the default layer size.
tf_device: str, Tensorflow device on which the agent's graph is executed.
use_staging: bool, when True use a staging area to prefetch the next
training batch, speeding training up by about 30%.
optimizer: `tf.train.Optimizer`, for training the value function.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
"""
self._replay_scheme = replay_scheme
self.optimizer = optimizer
self.number_of_gammas = number_of_gammas
self.gamma_max = gamma_max
self.acting_policy = acting_policy
self.hyp_exponent = hyp_exponent
self.integral_estimate = integral_estimate
self.gradient_clipping_norm = gradient_clipping_norm
self.network_size_expansion = network_size_expansion
# These are the discount factors (gammas) used to estimate the integral.
self.eval_gammas = agent_utils.compute_eval_gamma_interval(
self.gamma_max, self.hyp_exponent, self.number_of_gammas)
# However, if we wish to estimate hyperbolic discounting with the form,
#
# \Gamma_t = 1. / (1. + k * t)
#
# where we now have a coefficient k <= 1.0
# we need consider the value functions for \gamma ^ k. We refer to
# these below as self.gammas, since these are the gammas actually being
# learned via Bellman updates.
self.gammas = [
math.pow(gamma, self.hyp_exponent) for gamma in self.eval_gammas
]
assert max(self.gammas) <= self.gamma_max
super(HyperDQNAgent, self).__init__(
sess=sess,
num_actions=num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
gamma=0, # TODO(liamfedus): better way to deal with self.gamma
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
tf_device=tf_device,
use_staging=use_staging,
optimizer=self.optimizer,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency)
def _get_network_type(self):
"""Returns the type of the outputs of a value distribution network.
The hyperbolic Q-values are estimated via a Riemann sum to approximate the
integral. This builds a lower or an upper estimate of the integral via a
set of rectangles.
Returns:
net_type: _network_type object defining the outputs of the network.
"""
return collections.namedtuple('hyper_dqn_network',
['hyp_q_value', 'q_values'])
def _network_template(self, state):
"""Builds a convolutional network that outputs Q-value distributions.
Args:
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = slim.conv2d(
net, int(32 * self.network_size_expansion), [8, 8], stride=4)
net = slim.conv2d(
net, int(64 * self.network_size_expansion), [4, 4], stride=2)
net = slim.conv2d(
net, int(64 * self.network_size_expansion), [3, 3], stride=1)
net = slim.flatten(net)
net = slim.fully_connected(net, int(512 * self.network_size_expansion))
q_values = []
for _ in range(self.number_of_gammas):
gamma_q_value = slim.fully_connected(
net, self.num_actions, activation_fn=None)
q_values.append(gamma_q_value)
# Estimate the hyperbolic discounted q-values
hyp_q_value = agent_utils.integrate_q_values(q_values,
self.integral_estimate,
self.eval_gammas,
self.number_of_gammas,
self.gammas)
return self._get_network_type()(hyp_q_value, q_values)
def _build_networks(self):
"""Builds the Q-value network computations needed for acting and training.
These are:
self.online_convnet: For computing the current state's Q-values.
self.target_convnet: For computing the next state's target Q-values.
self._net_outputs: The actual Q-values.
self._q_argmax: The action maximizing the current state's Q-values.
self._replay_net_outputs: The replayed states' Q-values.
self._replay_next_target_net_outputs: The replayed next states' target
Q-values (see Mnih et al., 2015 for details).
"""
# Calling online_convnet will generate a new graph as defined in
# self._get_network_template using whatever input is passed, but will always
# share the same weights.
self.online_convnet = tf.make_template('Online', self._network_template)
self.target_convnet = tf.make_template('Target', self._network_template)
self._net_outputs = self.online_convnet(self.state_ph)
self._replay_net_outputs = self.online_convnet(self._replay.states)
self._replay_next_target_net_outputs = self.target_convnet(
self._replay.next_states)
if self.acting_policy == 'hyperbolic':
self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]
elif self.acting_policy == 'largest_gamma':
self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]
else:
raise NotImplementedError
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent.
Args:
use_staging: bool, if True, uses a staging area to prefetch data for
faster training.
Returns:
A WrapperReplayBuffer object.
"""
return circular_replay_buffer.WrappedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
observation_dtype=self.observation_dtype.as_numpy_dtype)
def _build_discounted_n_step_rewards(self, gamma):
"""Compute the discounted n-step return."""
discounts = [gamma**i for i in range(self.update_horizon)]
discount_tensor = tf.constant(discounts)
return tf.reduce_sum(self._replay.rewards * discount_tensor, axis=1)
def _build_target_q_op(self):
"""Build an op used as a target for the Q-value.
Returns:
target_q_op: An op calculating the Q-value.
"""
targets = []
for gamma, target_q in zip(self.gammas,
self._replay_next_target_net_outputs.q_values):
# Get the maximum Q-value across the actions dimension.
replay_next_qt_max = tf.reduce_max(target_q, 1)
# Calculate the Bellman target value.
# Q_t = R_t + \gamma^N * Q'_t+1
# where,
# Q'_t+1 = \argmax_a Q(S_t+1, a)
# (or) 0 if S_t is a terminal state,
# and
# N is the update horizon (by default, N=1).
cumulative_gamma = math.pow(gamma, self.update_horizon)
n_step_reward = self._build_discounted_n_step_rewards(gamma)
targets.append(n_step_reward + cumulative_gamma * replay_next_qt_max *
(1. - tf.cast(self._replay.terminals, tf.float32)))
return targets
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training from replay data.
"""
replay_action_one_hot = tf.one_hot(
self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')
replay_chosen_qs = []
for i in range(len(self.gammas)):
replay_chosen_q = tf.reduce_sum(
self._replay_net_outputs.q_values[i] * replay_action_one_hot,
reduction_indices=1,
name='replay_chosen_q_{}'.format(i))
replay_chosen_qs.append(replay_chosen_q)
targets = self._build_target_q_op()
loss = 0.
for i, (target,
replay_chosen_q) in enumerate(zip(targets, replay_chosen_qs)):
gamma_loss = tf.losses.huber_loss(
tf.stop_gradient(target),
replay_chosen_q,
reduction=tf.losses.Reduction.NONE)
loss += gamma_loss
if self.summary_writer is not None:
tf.summary.scalar('Losses/GammaLoss_{}'.format(i),
tf.reduce_mean(gamma_loss))
# Divide by the number of gammas to preserve scale.
loss = loss / self.number_of_gammas
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('HuberLoss', tf.reduce_mean(loss))
def clip_if_not_none(grad, clip_norm=5.):
"""Clip the gradient only if not None."""
if grad is None:
return grad
return tf.clip_by_norm(grad, clip_norm)
if self.gradient_clipping_norm is not None:
# Clip gradients to test stability.
grads_and_vars = self.optimizer.compute_gradients(tf.reduce_mean(loss))
clipped_gradients = [
(clip_if_not_none(grad, clip_norm=self.gradient_clipping_norm), var)
for grad, var in grads_and_vars
]
return self.optimizer.apply_gradients(clipped_gradients)
else:
return self.optimizer.minimize(tf.reduce_mean(loss))
|
google-research/google-research
|
hyperbolic_discount/hyperbolic_dqn_agent.py
|
Python
|
apache-2.0
| 13,894
|
import _plotly_utils.basevalidators
class XperiodalignmentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xperiodalignment", parent_name="heatmap", **kwargs):
super(XperiodalignmentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"xtype": "scaled"}),
values=kwargs.pop("values", ["start", "middle", "end"]),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/heatmap/_xperiodalignment.py
|
Python
|
mit
| 572
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from .components import aes, assign_visual_mapping
from .components import colors, shapes
from .components.legend import draw_legend
from .geoms import *
from .scales import *
from .themes.theme_gray import _set_default_theme_rcparams
from .themes.theme_gray import _theme_grey_post_plot_callback
import six
__ALL__ = ["ggplot"]
import sys
import warnings
from copy import deepcopy
# Show plots if in interactive mode
if sys.flags.interactive:
plt.ion()
# Workaround for matplotlib 1.1.1 not having a rc_context
if not hasattr(mpl, 'rc_context'):
from .utils import _rc_context
mpl.rc_context = _rc_context
class ggplot(object):
"""
ggplot is the base layer or object that you use to define
the components of your chart (x and y axis, shapes, colors, etc.).
You can combine it with layers (or geoms) to make complex graphics
with minimal effort.
Parameters
-----------
aesthetics : aes (ggplot.components.aes.aes)
aesthetics of your plot
data : pandas DataFrame (pd.DataFrame)
a DataFrame with the data you want to plot
Examples
----------
>>> p = ggplot(aes(x='x', y='y'), data=diamonds)
>>> print(p + geom_point())
"""
CONTINUOUS = ['x', 'y', 'size', 'alpha']
DISCRETE = ['color', 'shape', 'marker', 'alpha', 'linestyle']
def __init__(self, aesthetics, data):
# ggplot should just 'figure out' which is which
if not isinstance(data, pd.DataFrame):
aesthetics, data = data, aesthetics
self.aesthetics = aesthetics
self.data = _apply_transforms(data, self.aesthetics)
# defaults
self.geoms = []
self.n_wide = 1
self.n_high = 1
self.n_dim_x = None
self.n_dim_y = None
# facets
self.facets = []
self.facet_type = None
self.facet_scales = None
self.facet_pairs = [] # used by facet_grid
# components
self.title = None
self.xlab = None
self.ylab = None
# format for x/y major ticks
self.xtick_formatter = None
self.xbreaks = None
self.xtick_labels = None
self.xmajor_locator = None
self.xminor_locator = None
self.ytick_formatter = None
self.xlimits = None
self.ylimits = None
self.ytick_labels = None
self.scale_y_reverse = None
self.scale_x_reverse = None
self.scale_y_log = None
self.scale_x_log = None
# legend is a dictionary of {legend_type: {visual_value: legend_key}},
# where legend_type is one of "color", "linestyle", "marker", "size";
# visual_value is color value, line style, marker character, or size
# value; and legend_key is a quantile.
self.legend = {}
# Theme releated options
# this must be set by any theme to prevent addig the default theme
self.theme_applied = False
self.rcParams = {}
# Callbacks to change aspects of each axis
self.post_plot_callbacks = []
# continuous color configs
self.color_scale = None
self.colormap = plt.cm.Blues
self.manual_color_list = None
def __repr__(self):
"""Print/show the plot"""
figure = self.draw()
# We're going to default to making the plot appear when __repr__ is
# called.
#figure.show() # doesn't work in ipython notebook
plt.show()
# TODO: We can probably get more sugary with this
return "<ggplot: (%d)>" % self.__hash__()
def __deepcopy__(self, memo):
'''deepcopy support for ggplot'''
# This is a workaround as ggplot(None, None) does not really work :-(
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
for key, item in self.__dict__.items():
# don't make a deepcopy of data!
if key == "data":
result.__dict__[key] = self.__dict__[key]
continue
result.__dict__[key] = deepcopy(self.__dict__[key], memo)
return result
def draw(self):
# Adding rc=self.rcParams does not validate/parses the params which then
# throws an error during plotting!
with mpl.rc_context():
if not self.theme_applied:
_set_default_theme_rcparams(mpl)
# will be empty if no theme was applied
for key in six.iterkeys(self.rcParams):
val = self.rcParams[key]
# there is a bug in matplotlib which does not allow None directly
# https://github.com/matplotlib/matplotlib/issues/2543
try:
if key == 'text.dvipnghack' and val is None:
val = "none"
mpl.rcParams[key] = val
except Exception as e:
msg = """Setting "mpl.rcParams['%s']=%s" raised an Exception: %s""" % (key, str(val), str(e))
warnings.warn(msg, RuntimeWarning)
# draw is not allowed to show a plot, so we can use to result for ggsave
# This sets a rcparam, so we don't have to undo it after plotting
mpl.interactive(False)
if self.facet_type == "grid" and len(self.facets) > 1:
fig, axs = plt.subplots(self.n_high, self.n_wide,
sharex=True, sharey=True)
plt.subplots_adjust(wspace=.05, hspace=.05)
elif self.facet_type == "wrap" or len(self.facets)==1:
# add (more than) the needed number of subplots
fig, axs = plt.subplots(self.n_high, self.n_wide)
# there are some extra, remove the plots
subplots_available = self.n_wide * self.n_high
if self.n_dim_x:
extra_subplots = subplots_available - self.n_dim_x
else:
extra_subplots = 0
for extra_plot in axs.flatten()[-extra_subplots:]:
extra_plot.axis('off')
# plots is a mapping from xth-plot -> subplot position
plots = []
for x in range(self.n_wide):
for y in range(self.n_high):
plots.append((x, y))
plots = sorted(plots, key=lambda x: x[1] + x[0] * self.n_high + 1)
else:
fig, axs = plt.subplots(self.n_high, self.n_wide)
axs = np.atleast_2d(axs)
# Set the default plot to the first one
plt.subplot(self.n_wide, self.n_high, 1)
# Aes need to be initialized BEFORE we start faceting. This is b/c
# we want to have a consistent aes mapping across facets.
self.data = assign_visual_mapping(self.data, self.aesthetics, self)
# Faceting just means doing an additional groupby. The
# dimensions of the plot remain the same
if self.facets:
# geom_bar does not work with faceting yet
_check_geom_bar = lambda x :isinstance(x, geom_bar)
if any(map(_check_geom_bar, self.geoms)):
msg = """Facetting is currently not supported with geom_bar. See
https://github.com/yhat/ggplot/issues/196 for more information"""
warnings.warn(msg, RuntimeWarning)
# the current subplot in the axs and plots
cntr = 0
#first grids: faceting with two variables and defined positions
if len(self.facets) == 2 and self.facet_type != "wrap":
# store the extreme x and y coordinates of each pair of axes
axis_extremes = np.zeros(shape=(self.n_high * self.n_wide, 4))
xlab_offset = .15
for _iter, (facets, frame) in enumerate(self.data.groupby(self.facets)):
pos = self.facet_pairs.index(facets) + 1
plt.subplot(self.n_wide, self.n_high, pos)
for layer in self._get_layers(frame):
for geom in self.geoms:
callbacks = geom.plot_layer(layer)
axis_extremes[_iter] = [min(plt.xlim()), max(plt.xlim()),
min(plt.ylim()), max(plt.ylim())]
# find the grid wide data extremeties
xlab_min, ylab_min = np.min(axis_extremes, axis=0)[[0, 2]]
xlab_max, ylab_max = np.max(axis_extremes, axis=0)[[1, 3]]
# position of vertical labels for facet grid
xlab_pos = xlab_max + xlab_offset
ylab_pos = ylab_max - float(ylab_max - ylab_min) / 2
# This needs to enumerate all possibilities
for pos, facets in enumerate(self.facet_pairs):
pos += 1
# Plot the top and right boxes
if pos <= self.n_high: # first row
plt.subplot(self.n_wide, self.n_high, pos)
plt.table(cellText=[[facets[1]]], loc='top',
cellLoc='center', cellColours=[['lightgrey']])
if (pos % self.n_high) == 0: # last plot in a row
plt.subplot(self.n_wide, self.n_high, pos)
x = max(plt.xticks()[0])
y = max(plt.yticks()[0])
ax = axs[pos % self.n_high][pos % self.n_wide]
ax = plt.gca()
ax.text(1, 0.5, facets[0],
bbox=dict(
facecolor='lightgrey',
edgecolor='black',
color='black',
width=mpl.rcParams['font.size'] * 1.65
),
transform=ax.transAxes,
fontdict=dict(rotation=-90, verticalalignment="center", horizontalalignment='left')
)
plt.subplot(self.n_wide, self.n_high, pos)
# Handle the different scale types here
# (free|free_y|free_x|None) and also make sure that only the
# left column gets y scales and the bottom row gets x scales
scale_facet_grid(self.n_wide, self.n_high,
self.facet_pairs, self.facet_scales)
else: # now facet_wrap > 2 or facet_grid w/ only 1 facet
for facet, frame in self.data.groupby(self.facets):
for layer in self._get_layers(frame):
for geom in self.geoms:
if self.facet_type == "wrap" or 1==1:
if cntr + 1 > len(plots):
continue
pos = plots[cntr]
if pos is None:
continue
y_i, x_i = pos
pos = x_i + y_i * self.n_high + 1
ax = plt.subplot(self.n_wide, self.n_high, pos)
else:
ax = plt.subplot(self.n_wide, self.n_high, cntr)
# TODO: this needs some work
if (cntr % self.n_high) == -1:
plt.tick_params(axis='y', which='both',
bottom='off', top='off',
labelbottom='off')
callbacks = geom.plot_layer(layer)
if callbacks:
for callback in callbacks:
fn = getattr(ax, callback['function'])
fn(*callback['args'])
title = facet
if isinstance(facet, tuple):
title = ", ".join(facet)
plt.table(cellText=[[title]], loc='top',
cellLoc='center', cellColours=[['lightgrey']])
cntr += 1
# NOTE: Passing n_high for cols (instead of n_wide) and
# n_wide for rows because in all previous calls to
# plt.subplot, n_wide is passed as the number of rows, not
# columns.
scale_facet_wrap(self.n_wide, self.n_high, range(cntr), self.facet_scales)
else: # no faceting
for geom in self.geoms:
_aes = self.aesthetics
if geom.aes:
# update the default mapping with the geom specific one
_aes = _aes.copy()
_aes.update(geom.aes)
if not geom.data is None:
data = _apply_transforms(geom.data, _aes)
data = assign_visual_mapping(data, _aes, self)
else:
data = self.data
for layer in self._get_layers(data, _aes):
ax = plt.subplot(1, 1, 1)
callbacks = geom.plot_layer(layer)
if callbacks:
for callback in callbacks:
fn = getattr(ax, callback['function'])
fn(*callback['args'])
# Handling the details of the chart here; probably be a better
# way to do this...
if self.title:
if self.facets:
# This is currently similar what plt.title uses
plt.gcf().suptitle(self.title, verticalalignment='baseline',
fontsize=mpl.rcParams['axes.titlesize'])
else:
plt.title(self.title)
if self.xlab:
if self.facet_type == "grid":
fig.text(0.5, 0.025, self.xlab)
else:
for ax in plt.gcf().axes:
ax.set_xlabel(self.xlab)
if self.ylab:
if self.facet_type == "grid":
fig.text(0.025, 0.5, self.ylab, rotation='vertical')
else:
for ax in plt.gcf().axes:
ax.set_ylabel(self.ylab)
# in case of faceting, this should be applied to all axis!
for ax in plt.gcf().axes:
if self.xmajor_locator:
ax.xaxis.set_major_locator(self.xmajor_locator)
if self.xtick_formatter:
ax.xaxis.set_major_formatter(self.xtick_formatter)
fig.autofmt_xdate()
if self.xbreaks: # xbreaks is a list manually provided
ax.xaxis.set_ticks(self.xbreaks)
if self.xtick_labels:
if isinstance(self.xtick_labels, dict):
labs = []
for lab in plt.xticks()[1]:
lab = lab.get_text()
lab = self.xtick_labels.get(lab)
labs.append(lab)
ax.xaxis.set_ticklabels(labs)
elif isinstance(self.xtick_labels, list):
ax.xaxis.set_ticklabels(self.xtick_labels)
if self.ytick_labels:
if isinstance(self.ytick_labels, dict):
labs = []
for lab in plt.yticks()[1]:
lab = lab.get_text()
lab = self.ytick_labels.get(lab)
labs.append(lab)
ax.yaxis.set_ticklabels(labs)
elif isinstance(self.ytick_labels, list):
ax.yaxis.set_ticklabels(self.ytick_labels)
if self.ytick_formatter:
ax.yaxis.set_major_formatter(self.ytick_formatter)
if self.xlimits:
if not self.xbreaks and not self.xtick_labels:
labs, minval, maxval= utils.calc_axis_breaks_and_limits(self.xlimits[0], self.xlimits[1])
ax.xaxis.set_ticks(labs)
ax.xaxis.set_ticklabels(labs)
ax.set_xlim(self.xlimits)
if self.ylimits:
if not self.ytick_labels:
labs, minval, maxval= utils.calc_axis_breaks_and_limits(self.ylimits[0], self.ylimits[1])
ax.yaxis.set_ticks(labs)
ax.yaxis.set_ticklabels(labs)
ax.set_ylim(self.ylimits)
if self.scale_y_reverse:
ax.invert_yaxis()
if self.scale_x_reverse:
ax.invert_xaxis()
if self.scale_y_log:
ax.set_yscale('log', basey=self.scale_y_log)
if self.scale_x_log:
ax.set_xscale('log', basex=self.scale_x_log)
# TODO: Having some issues here with things that shouldn't have a
# legend or at least shouldn't get shrunk to accomodate one. Need
# some sort of test in place to prevent this OR prevent legend
# getting set to True.
if self.legend:
# works with faceted and non-faceted plots
ax = axs[0][self.n_wide - 1]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
cntr = 0
# py3 and py2 have different sorting order in dics, so make that consistent
for ltype in sorted(self.legend.keys()):
legend = self.legend[ltype]
lname = self.aesthetics.get(ltype, ltype)
new_legend = draw_legend(ax, legend, ltype, lname, cntr)
ax.add_artist(new_legend)
cntr += 1
# Finaly apply any post plot callbacks (theming, etc)
if self.theme_applied:
for ax in plt.gcf().axes:
self._apply_post_plot_callbacks(ax)
else:
for ax in plt.gcf().axes:
_theme_grey_post_plot_callback(ax)
return plt.gcf()
def _get_layers(self, data=None, aes=None):
"""Get a layer to be plotted."""
# Use the default data and aestetics in case no specific ones are supplied
if data is None:
data = self.data
if aes is None:
aes = self.aesthetics
mapping = {}
extra = {}
for ae, key in aes.items():
if isinstance(key, list) or hasattr(key, "__array__"):
# direct assignment of a list/array to the aes -> it's done in the get_layer step
mapping[ae] = key
elif key in data:
# a column or a transformed column
mapping[ae] = data[key]
else:
# now we have a single value. ggplot2 treats that as if all rows should be this
# value, so lets do the same. To ensure that all rows get this value, we have to
# do that after we constructed the dataframe.
# See also the _apply_transform function below, which does this already for
# string values.
extra[ae] = key
mapping = pd.DataFrame(mapping)
for ae, key in extra.items():
mapping[ae] = key
# Overwrite the already done mappings to matplotlib understandable
# values for color/size/etc
if "color" in mapping:
mapping['color'] = data['color_mapping']
if "size" in mapping:
mapping['size'] = data['size_mapping']
if "shape" in mapping:
mapping['shape'] = data['shape_mapping']
if "linestyle" in mapping:
mapping['linestyle'] = data['linestyle_mapping']
# Default the x and y axis labels to the name of the column
if "x" in aes and self.xlab is None:
self.xlab = aes['x']
if "y" in aes and self.ylab is None:
self.ylab = aes['y']
# Automatically drop any row that has an NA value
mapping = mapping.dropna()
discrete_aes = [ae for ae in self.DISCRETE if ae in mapping]
# TODO: it think this infomation should better be passed in to the plot_layer() and should be based whether the variable is a factor or not
# -> Use dtypes = object/string or in case we use a proper "factor" function -> compute the levels over the whole dataframe in case of faceting!
# TODO: It would be nice if the plot_layer() methods could get a dataframe in case some munging is required
# TODO: maybe change this to pass in the complete dataframe for the layer and let the plot_layer function work out that it has to plot each series differently.
layers = []
if len(discrete_aes) == 0:
frame = mapping.to_dict('list')
layers.append(frame)
else:
for name, frame in mapping.groupby(discrete_aes):
frame = frame.to_dict('list')
for ae in self.DISCRETE:
if ae in frame:
frame[ae] = frame[ae][0]
layers.append(frame)
return layers
def add_to_legend(self, legend_type, legend_dict, scale_type="discrete"):
"""Adds the the specified legend to the legend
Parameters
----------
legend_type : str
type of legend, one of "color", "linestyle", "marker", "size"
legend_dict : dict
a dictionary of {visual_value: legend_key} where visual_value
is color value, line style, marker character, or size value;
and legend_key is a quantile.
scale_type : str
either "discrete" (default) or "continuous"; usually only color
needs to specify which kind of legend should be drawn, all
other scales will get a discrete scale.
"""
# scale_type is up to now unused
# TODO: what happens if we add a second color mapping?
# Currently the color mapping in the legend is overwritten.
# What does ggplot do in such a case?
if legend_type in self.legend:
pass
#msg = "Adding a secondary mapping of {0} is unsupported and no legend for this mapping is added.\n"
#sys.stderr.write(msg.format(str(legend_type)))
self.legend[legend_type] = legend_dict
def _apply_post_plot_callbacks(self, axis):
for cb in self.post_plot_callbacks:
cb(axis)
def _is_identity(x):
if x in colors.COLORS:
return True
elif x in shapes.SHAPES:
return True
elif isinstance(x, (float, int)):
return True
else:
return False
def _apply_transforms(data, aes):
"""Adds columns from the aes included transformations
Possible transformations are "factor(<col>)" and
expressions which can be used with eval.
Parameters
----------
data : DataFrame
the original dataframe
aes : aesthetics
the aesthetic
Returns
-------
data : DateFrame
Transformed DataFrame
"""
data = data.copy()
for ae, name in aes.items():
if (isinstance(name, six.string_types) and (name not in data)):
# here we assume that it is a transformation
# if the mapping is to a single value (color="red"), this will be handled by pandas and
# assigned to the whole index. See also the last case in mapping building in get_layer!
from patsy.eval import EvalEnvironment
def factor(s, levels=None, labels=None):
# TODO: This factor implementation needs improvements...
# probably only gonna happen after https://github.com/pydata/pandas/issues/5313 is
# implemented in pandas ...
if levels or labels:
print("factor levels or labels are not yet implemented.")
return s.apply(str)
# use either the captured eval_env from aes or use the env one steps up
env = EvalEnvironment.capture(eval_env=(aes.__eval_env__ or 1))
# add factor as a special case
env.add_outer_namespace({"factor":factor})
try:
new_val = env.eval(name, inner_namespace=data)
except Exception as e:
msg = "Could not evaluate the '%s' mapping: '%s' (original error: %s)"
raise Exception(msg % (ae, name, str(e)))
try:
data[name] = new_val
except Exception as e:
msg = """The '%s' mapping: '%s' produced a value of type '%s', but only single items
and lists/arrays can be used. (original error: %s)"""
raise Exception(msg % (ae, name, str(type(_new_val)), str(e)))
return data
|
eco32i/ggplot
|
ggplot/ggplot.py
|
Python
|
bsd-2-clause
| 25,992
|
def extractSimonwesterosBlogspotCom(item):
'''
Parser for 'simonwesteros.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSimonwesterosBlogspotCom.py
|
Python
|
bsd-3-clause
| 564
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.utils import update_module_attributes
from ._theme import override_current_theme_class
__all__ = [
"override_current_theme_class",
]
update_module_attributes(__all__, __name__)
|
shoopio/shoop
|
shuup/xtheme/testing.py
|
Python
|
agpl-3.0
| 425
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def _gap_insertion_sort(a_list, start, gap):
for i in range(start + gap, len(a_list), gap):
current_value = a_list[i]
position = i
while (position >= gap) and (a_list[position - gap] > current_value):
a_list[position] = a_list[position - gap]
position = position - gap
a_list[position] = current_value
def shell_sort(a_list):
"""Shell Sort algortihm.
Time complexity: O(n^2).
"""
sublist_count = len(a_list) // 2
while sublist_count > 0:
for start_pos in range(sublist_count):
_gap_insertion_sort(a_list, start_pos, sublist_count)
print('After increments of size {0}:\n{1}'
.format(sublist_count, a_list))
sublist_count = sublist_count // 2
def main():
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('a_list: {}'.format(a_list))
print('By Shell Sort: ')
shell_sort(a_list)
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures
|
alg_shell_sort.py
|
Python
|
bsd-2-clause
| 1,092
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.paystub_pay_frequency import PaystubPayFrequency
globals()['PaystubPayFrequency'] = PaystubPayFrequency
class PaystubDetails(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pay_period_start_date': (date, none_type,), # noqa: E501
'pay_period_end_date': (date, none_type,), # noqa: E501
'pay_date': (date, none_type,), # noqa: E501
'paystub_provider': (str, none_type,), # noqa: E501
'pay_frequency': (PaystubPayFrequency,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'pay_period_start_date': 'pay_period_start_date', # noqa: E501
'pay_period_end_date': 'pay_period_end_date', # noqa: E501
'pay_date': 'pay_date', # noqa: E501
'paystub_provider': 'paystub_provider', # noqa: E501
'pay_frequency': 'pay_frequency', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PaystubDetails - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
pay_period_start_date (date, none_type): Beginning date of the pay period on the paystub in the 'YYYY-MM-DD' format.. [optional] # noqa: E501
pay_period_end_date (date, none_type): Ending date of the pay period on the paystub in the 'YYYY-MM-DD' format.. [optional] # noqa: E501
pay_date (date, none_type): Pay date on the paystub in the 'YYYY-MM-DD' format.. [optional] # noqa: E501
paystub_provider (str, none_type): The name of the payroll provider that generated the paystub, e.g. ADP. [optional] # noqa: E501
pay_frequency (PaystubPayFrequency): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
plaid/plaid-python
|
plaid/model/paystub_details.py
|
Python
|
mit
| 8,019
|
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from kmip.core import enums
from kmip.demos import utils
from kmip.pie import client
if __name__ == '__main__':
logger = utils.build_console_logger(logging.INFO)
# Build and parse arguments
parser = utils.build_cli_parser(enums.Operation.DESTROY)
opts, args = parser.parse_args(sys.argv[1:])
config = opts.config
uid = opts.uuid
# Exit early if the UUID is not specified
if uid is None:
logger.error('No UUID provided, exiting early from demo')
sys.exit()
# Build the client and connect to the server
with client.ProxyKmipClient(
config=config,
config_file=opts.config_file
) as client:
try:
client.destroy(uid)
logger.info("Successfully destroyed secret with ID: {0}".format(
uid))
except Exception as e:
logger.error(e)
|
OpenKMIP/PyKMIP
|
kmip/demos/pie/destroy.py
|
Python
|
apache-2.0
| 1,553
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import scipy
import random
from gnuradio import gr, gr_unittest
import blocks_swig as blocks
import digital_swig as digital
import channels_swig as channels
from ofdm_txrx import ofdm_tx, ofdm_rx
from utils import tagged_streams
# Set this to true if you need to write out data
LOG_DEBUG_INFO=False
class ofdm_tx_fg (gr.top_block):
def __init__(self, data, len_tag_key):
gr.top_block.__init__(self, "ofdm_tx")
tx_data, tags = tagged_streams.packets_to_vectors((data,), len_tag_key)
src = blocks.vector_source_b(data, False, 1, tags)
self.tx = ofdm_tx(packet_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
self.sink = blocks.vector_sink_c()
self.connect(src, self.tx, self.sink)
def get_tx_samples(self):
return self.sink.data()
class ofdm_rx_fg (gr.top_block):
def __init__(self, samples, len_tag_key, channel=None, prepend_zeros=100):
gr.top_block.__init__(self, "ofdm_rx")
if prepend_zeros:
samples = (0,) * prepend_zeros + tuple(samples)
src = blocks.vector_source_c(tuple(samples) + (0,) * 1000)
self.rx = ofdm_rx(frame_length_tag_key=len_tag_key, debug_log=LOG_DEBUG_INFO)
if channel is not None:
self.connect(src, channel, self.rx)
else:
self.connect(src, self.rx)
self.sink = blocks.vector_sink_b()
self.connect(self.rx, self.sink)
def get_rx_bytes(self):
return self.sink.data()
class test_ofdm_txrx (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_tx (self):
""" Just make sure the Tx works in general """
len_tag_key = 'frame_len'
n_bytes = 52
n_samples_expected = (numpy.ceil(1.0 * (n_bytes + 4) / 6) + 3) * 80
test_data = [random.randint(0, 255) for x in range(n_bytes)]
tx_data, tags = tagged_streams.packets_to_vectors((test_data,), len_tag_key)
src = blocks.vector_source_b(test_data, False, 1, tags)
tx = ofdm_tx(packet_length_tag_key=len_tag_key)
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
self.assertEqual(len(tx_fg.get_tx_samples()), n_samples_expected)
def test_002_rx_only_noise(self):
""" Run the RX with only noise, check it doesn't crash
or return a burst. """
len_tag_key = 'frame_len'
samples = (0,) * 1000
channel = channels.channel_model(0.1)
rx_fg = ofdm_rx_fg(samples, len_tag_key, channel)
rx_fg.run()
self.assertEqual(len(rx_fg.get_rx_bytes()), 0)
def test_003_tx1packet(self):
""" Transmit one packet, with slight AWGN and slight frequency + timing offset.
Check packet is received and no bit errors have occurred. """
len_tag_key = 'frame_len'
n_bytes = 21
fft_len = 64
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier, a fine freq offset stays below that
freq_offset = 1.0 / fft_len * 0.7
#channel = channels.channel_model(0.01, freq_offset)
channel = None
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(tuple(tx_fg.tx.sync_word1), tuple(rx_fg.rx.sync_word1))
self.assertEqual(tuple(tx_fg.tx.sync_word2), tuple(rx_fg.rx.sync_word2))
self.assertEqual(test_data, rx_data)
def test_004_tx1packet_large_fO(self):
""" Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. """
fft_len = 64
len_tag_key = 'frame_len'
n_bytes = 21
test_data = tuple([random.randint(0, 255) for x in range(n_bytes)])
#test_data = tuple([255 for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier
frequency_offset = 1.0 / fft_len * 2.5
channel = channels.channel_model(0.00001, frequency_offset)
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(test_data, rx_data)
if __name__ == '__main__':
gr_unittest.run(test_ofdm_txrx, "test_ofdm_txrx.xml")
|
Gabotero/GNURadioNext
|
gr-digital/python/qa_ofdm_txrx.py
|
Python
|
gpl-3.0
| 5,471
|
"""
MB85RC04 driver code is placed under the BSD license.
Copyright (c) 2014, Emlid Limited, www.emlid.com
Written by Vladislav Zakharov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Emlid Limited nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL EMLID LIMITED BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from smbus import SMBus
class MB85RC04:
def __init__(self, I2C_bus_number = 1, address = 0x50):
self.bus = SMBus(I2C_bus_number)
self.address = address
def readByte(self, registerAddress):
if(registerAddress > 255):
self.address = self.address | 1
registerAddress = registerAddress - 256
else:
self.address = self.address & 0xFE
return self.bus.read_byte_data(self.address, registerAddress)
def writeByte(self, registerAddress, data):
if(registerAddress > 255):
self.address = self.address | 1
registerAddress = registerAddress - 256
else:
self.address = self.address & 0xFE
self.bus.write_byte_data(self.address, registerAddress, data)
def readBytes(self, registerAddress):
if(registerAddress > 255):
self.address = self.address | 1
registerAddress = registerAddress - 256
else:
self.address = self.address & 0xFE
return self.bus.read_i2c_block_data(self.address, registerAddress)
def writeBytes(self, registerAddress, data):
if(registerAddress > 255):
self.address = self.address | 1
registerAddress = registerAddress - 256
else:
self.address = self.address & 0xFE
self.bus.write_i2c_block_data(self.address, registerAddress, data)
|
muawijhe/ccnavio
|
ext/navio/Python/FRAM/MB85RC04.py
|
Python
|
mit
| 3,115
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('converter.views',
url(r'^formats/$', 'formats_list', (), 'formats_list'),
)
|
appsembler/mayan_appsembler
|
apps/converter/urls.py
|
Python
|
gpl-3.0
| 157
|
"""Provides a class for managing BIG-IP Profile resources."""
# coding=utf-8
#
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5_cccl.resource import Resource
LOGGER = logging.getLogger(__name__)
class Profile(Resource):
"""Profile class for managing configuration on BIG-IP."""
properties = dict(name=None,
partition=None,
context="all")
def __init__(self, name, partition, **properties):
"""Create a Virtual server instance."""
super(Profile, self).__init__(name, partition)
self._data['context'] = properties.get('context', "all")
def __eq__(self, other):
if not isinstance(other, Profile):
return False
return super(Profile, self).__eq__(other)
def _uri_path(self, bigip):
""""""
raise NotImplementedError
def __repr__(self):
return 'Profile(%r, %r, context=%r)' % (self._data['name'],
self._data['partition'],
self._data['context'])
|
ryan-talley/f5-cccl
|
f5_cccl/resource/ltm/profile/profile.py
|
Python
|
apache-2.0
| 1,655
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'domoton.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
sonirico/domoton
|
domoton/urls.py
|
Python
|
gpl-2.0
| 276
|
'''
Created on Jul 22, 2011
@author: Rio
'''
import copy
from datetime import datetime
import itertools
from logging import getLogger
from math import floor
import os
import re
import random
import shutil
import struct
import time
import traceback
import weakref
import zlib
import sys
import blockrotation
from box import BoundingBox
from entity import Entity, TileEntity
from faces import FaceXDecreasing, FaceXIncreasing, FaceZDecreasing, FaceZIncreasing
from level import LightedChunk, EntityLevel, computeChunkHeightMap, MCLevel, ChunkBase
from materials import alphaMaterials
from mclevelbase import ChunkMalformed, ChunkNotPresent, exhaust, PlayerNotFound
import nbt
from numpy import array, clip, maximum, zeros
from regionfile import MCRegionFile
log = getLogger(__name__)
DIM_NETHER = -1
DIM_END = 1
__all__ = ["ZeroChunk", "AnvilChunk", "ChunkedLevelMixin", "MCInfdevOldLevel", "MCAlphaDimension", "ZipSchematic"]
_zeros = {}
class SessionLockLost(IOError):
pass
def ZeroChunk(height=512):
z = _zeros.get(height)
if z is None:
z = _zeros[height] = _ZeroChunk(height)
return z
class _ZeroChunk(ChunkBase):
" a placebo for neighboring-chunk routines "
def __init__(self, height=512):
zeroChunk = zeros((16, 16, height), 'uint8')
whiteLight = zeroChunk + 15
self.Blocks = zeroChunk
self.BlockLight = whiteLight
self.SkyLight = whiteLight
self.Data = zeroChunk
def unpackNibbleArray(dataArray):
s = dataArray.shape
unpackedData = zeros((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
return unpackedData
def packNibbleArray(unpackedData):
packedData = array(unpackedData.reshape(16, 16, unpackedData.shape[2] / 2, 2))
packedData[..., 1] <<= 4
packedData[..., 1] |= packedData[..., 0]
return array(packedData[:, :, :, 1])
def sanitizeBlocks(chunk):
# change grass to dirt where needed so Minecraft doesn't flip out and die
grass = chunk.Blocks == chunk.materials.Grass.ID
grass |= chunk.Blocks == chunk.materials.Dirt.ID
badgrass = grass[:, :, 1:] & grass[:, :, :-1]
chunk.Blocks[:, :, :-1][badgrass] = chunk.materials.Dirt.ID
# remove any thin snow layers immediately above other thin snow layers.
# minecraft doesn't flip out, but it's almost never intended
if hasattr(chunk.materials, "SnowLayer"):
snowlayer = chunk.Blocks == chunk.materials.SnowLayer.ID
badsnow = snowlayer[:, :, 1:] & snowlayer[:, :, :-1]
chunk.Blocks[:, :, 1:][badsnow] = chunk.materials.Air.ID
class AnvilChunkData(object):
""" This is the chunk data backing an AnvilChunk. Chunk data is retained by the MCInfdevOldLevel until its
AnvilChunk is no longer used, then it is either cached in memory, discarded, or written to disk according to
resource limits.
AnvilChunks are stored in a WeakValueDictionary so we can find out when they are no longer used by clients. The
AnvilChunkData for an unused chunk may safely be discarded or written out to disk. The client should probably
not keep references to a whole lot of chunks or else it will run out of memory.
"""
def __init__(self, world, chunkPosition, root_tag = None, create = False):
self.chunkPosition = chunkPosition
self.world = world
self.root_tag = root_tag
self.dirty = False
self.Blocks = zeros((16, 16, world.Height), 'uint8') # xxx uint16?
self.Data = zeros((16, 16, world.Height), 'uint8')
self.BlockLight = zeros((16, 16, world.Height), 'uint8')
self.SkyLight = zeros((16, 16, world.Height), 'uint8')
self.SkyLight[:] = 15
if create:
self._create()
else:
self._load(root_tag)
def _create(self):
(cx, cz) = self.chunkPosition
chunkTag = nbt.TAG_Compound()
chunkTag.name = ""
levelTag = nbt.TAG_Compound()
chunkTag["Level"] = levelTag
levelTag["HeightMap"] = nbt.TAG_Int_Array(zeros((16, 16), 'uint32').newbyteorder())
levelTag["TerrainPopulated"] = nbt.TAG_Byte(1)
levelTag["xPos"] = nbt.TAG_Int(cx)
levelTag["zPos"] = nbt.TAG_Int(cz)
levelTag["LastUpdate"] = nbt.TAG_Long(0)
levelTag["Entities"] = nbt.TAG_List()
levelTag["TileEntities"] = nbt.TAG_List()
self.root_tag = chunkTag
self.dirty = True
def _load(self, root_tag):
self.root_tag = root_tag
for sec in self.root_tag["Level"].pop("Sections", []):
y = sec["Y"].value * 16
for name in "Blocks", "Data", "SkyLight", "BlockLight":
arr = getattr(self, name)
secarray = sec[name].value
if name == "Blocks":
secarray.shape = (16, 16, 16)
else:
secarray.shape = (16, 16, 8)
secarray = unpackNibbleArray(secarray)
arr[..., y:y + 16] = secarray.swapaxes(0, 2)
def savedTagData(self):
""" does not recalculate any data or light """
log.debug(u"Saving chunk: {0}".format(self))
sanitizeBlocks(self)
sections = nbt.TAG_List()
for y in range(0, self.world.Height, 16):
section = nbt.TAG_Compound()
Blocks = self.Blocks[..., y:y + 16].swapaxes(0, 2)
Data = self.Data[..., y:y + 16].swapaxes(0, 2)
BlockLight = self.BlockLight[..., y:y + 16].swapaxes(0, 2)
SkyLight = self.SkyLight[..., y:y + 16].swapaxes(0, 2)
if (not Blocks.any() and
not BlockLight.any() and
(SkyLight == 15).all()):
continue
Data = packNibbleArray(Data)
BlockLight = packNibbleArray(BlockLight)
SkyLight = packNibbleArray(SkyLight)
section['Blocks'] = nbt.TAG_Byte_Array(array(Blocks))
section['Data'] = nbt.TAG_Byte_Array(array(Data))
section['BlockLight'] = nbt.TAG_Byte_Array(array(BlockLight))
section['SkyLight'] = nbt.TAG_Byte_Array(array(SkyLight))
section["Y"] = nbt.TAG_Byte(y / 16)
sections.append(section)
self.root_tag["Level"]["Sections"] = sections
data = self.root_tag.save(compressed=False)
del self.root_tag["Level"]["Sections"]
log.debug(u"Saved chunk {0}".format(self))
return data
@property
def materials(self):
return self.world.materials
class AnvilChunk(LightedChunk):
""" This is a 16x16xH chunk in an (infinite) world.
The properties Blocks, Data, SkyLight, BlockLight, and Heightmap
are ndarrays containing the respective blocks in the chunk file.
Each array is indexed [x,z,y]. The Data, Skylight, and BlockLight
arrays are automatically unpacked from nibble arrays into byte arrays
for better handling.
"""
def __init__(self, chunkData):
self.world = chunkData.world
self.chunkPosition = chunkData.chunkPosition
self.chunkData = chunkData
def savedTagData(self):
return self.chunkData.savedTagData()
def __str__(self):
return u"AnvilChunk, coords:{0}, world: {1}, D:{2}, L:{3}".format(self.chunkPosition, self.world.displayName, self.dirty, self.needsLighting)
@property
def needsLighting(self):
return self.chunkPosition in self.world.chunksNeedingLighting
@needsLighting.setter
def needsLighting(self, value):
if value:
self.world.chunksNeedingLighting.add(self.chunkPosition)
else:
self.world.chunksNeedingLighting.discard(self.chunkPosition)
def generateHeightMap(self):
if self.world.dimNo == DIM_NETHER:
self.HeightMap[:] = 0
else:
computeChunkHeightMap(self.materials, self.Blocks, self.HeightMap)
def addEntity(self, entityTag):
def doubleize(name):
# This is needed for compatibility with Indev levels. Those levels use TAG_Float for entity motion and pos
if name in entityTag:
m = entityTag[name]
entityTag[name] = nbt.TAG_List([nbt.TAG_Double(i.value) for i in m])
doubleize("Motion")
doubleize("Position")
self.dirty = True
return super(AnvilChunk, self).addEntity(entityTag)
def removeEntitiesInBox(self, box):
self.dirty = True
return super(AnvilChunk, self).removeEntitiesInBox(box)
def removeTileEntitiesInBox(self, box):
self.dirty = True
return super(AnvilChunk, self).removeTileEntitiesInBox(box)
# --- AnvilChunkData accessors ---
@property
def root_tag(self):
return self.chunkData.root_tag
@property
def dirty(self):
return self.chunkData.dirty
@dirty.setter
def dirty(self, val):
self.chunkData.dirty = val
# --- Chunk attributes ---
@property
def materials(self):
return self.world.materials
@property
def Blocks(self):
return self.chunkData.Blocks
@property
def Data(self):
return self.chunkData.Data
@property
def SkyLight(self):
return self.chunkData.SkyLight
@property
def BlockLight(self):
return self.chunkData.BlockLight
@property
def Biomes(self):
return self.root_tag["Level"]["Biomes"].value.reshape((16, 16))
@property
def HeightMap(self):
return self.root_tag["Level"]["HeightMap"].value.reshape((16, 16))
@property
def Entities(self):
return self.root_tag["Level"]["Entities"]
@property
def TileEntities(self):
return self.root_tag["Level"]["TileEntities"]
@property
def TerrainPopulated(self):
return self.root_tag["Level"]["TerrainPopulated"].value
@TerrainPopulated.setter
def TerrainPopulated(self, val):
"""True or False. If False, the game will populate the chunk with
ores and vegetation on next load"""
self.root_tag["Level"]["TerrainPopulated"].value = val
self.dirty = True
base36alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
def decbase36(s):
return int(s, 36)
def base36(n):
global base36alphabet
n = int(n)
if 0 == n:
return '0'
neg = ""
if n < 0:
neg = "-"
n = -n
work = []
while n:
n, digit = divmod(n, 36)
work.append(base36alphabet[digit])
return neg + ''.join(reversed(work))
def deflate(data):
# zobj = zlib.compressobj(6,zlib.DEFLATED,-zlib.MAX_WBITS,zlib.DEF_MEM_LEVEL,0)
# zdata = zobj.compress(data)
# zdata += zobj.flush()
# return zdata
return zlib.compress(data)
def inflate(data):
return zlib.decompress(data)
class ChunkedLevelMixin(MCLevel):
def blockLightAt(self, x, y, z):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
return ch.BlockLight[xInChunk, zInChunk, y]
def setBlockLightAt(self, x, y, z, newLight):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
ch.BlockLight[xInChunk, zInChunk, y] = newLight
ch.chunkChanged(False)
def blockDataAt(self, x, y, z):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Data[xInChunk, zInChunk, y]
def setBlockDataAt(self, x, y, z, newdata):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Data[xInChunk, zInChunk, y] = newdata
ch.dirty = True
ch.needsLighting = True
def blockAt(self, x, y, z):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Blocks[xInChunk, zInChunk, y]
def setBlockAt(self, x, y, z, blockID):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Blocks[xInChunk, zInChunk, y] = blockID
ch.dirty = True
ch.needsLighting = True
def skylightAt(self, x, y, z):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
return ch.SkyLight[xInChunk, zInChunk, y]
def setSkylightAt(self, x, y, z, lightValue):
if y < 0 or y >= self.Height:
return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
skyLight = ch.SkyLight
oldValue = skyLight[xInChunk, zInChunk, y]
ch.chunkChanged(False)
if oldValue < lightValue:
skyLight[xInChunk, zInChunk, y] = lightValue
return oldValue < lightValue
createChunk = NotImplemented
def generateLights(self, dirtyChunkPositions=None):
return exhaust(self.generateLightsIter(dirtyChunkPositions))
def generateLightsIter(self, dirtyChunkPositions=None):
""" dirtyChunks may be an iterable yielding (xPos,zPos) tuples
if none, generate lights for all chunks that need lighting
"""
startTime = datetime.now()
if dirtyChunkPositions is None:
dirtyChunkPositions = self.chunksNeedingLighting
else:
dirtyChunkPositions = (c for c in dirtyChunkPositions if self.containsChunk(*c))
dirtyChunkPositions = sorted(dirtyChunkPositions)
maxLightingChunks = getattr(self, 'loadedChunkLimit', 400)
log.info(u"Asked to light {0} chunks".format(len(dirtyChunkPositions)))
chunkLists = [dirtyChunkPositions]
def reverseChunkPosition((cx, cz)):
return cz, cx
def splitChunkLists(chunkLists):
newChunkLists = []
for l in chunkLists:
# list is already sorted on x position, so this splits into left and right
smallX = l[:len(l) / 2]
bigX = l[len(l) / 2:]
# sort halves on z position
smallX = sorted(smallX, key=reverseChunkPosition)
bigX = sorted(bigX, key=reverseChunkPosition)
# add quarters to list
newChunkLists.append(smallX[:len(smallX) / 2])
newChunkLists.append(smallX[len(smallX) / 2:])
newChunkLists.append(bigX[:len(bigX) / 2])
newChunkLists.append(bigX[len(bigX) / 2:])
return newChunkLists
while len(chunkLists[0]) > maxLightingChunks:
chunkLists = splitChunkLists(chunkLists)
if len(chunkLists) > 1:
log.info(u"Using {0} batches to conserve memory.".format(len(chunkLists)))
# batchSize = min(len(a) for a in chunkLists)
estimatedTotals = [len(a) * 32 for a in chunkLists]
workDone = 0
for i, dc in enumerate(chunkLists):
log.info(u"Batch {0}/{1}".format(i, len(chunkLists)))
dc = sorted(dc)
workTotal = sum(estimatedTotals)
t = 0
for c, t, p in self._generateLightsIter(dc):
yield c + workDone, t + workTotal - estimatedTotals[i], p
estimatedTotals[i] = t
workDone += t
timeDelta = datetime.now() - startTime
if len(dirtyChunkPositions):
log.info(u"Completed in {0}, {1} per chunk".format(timeDelta, dirtyChunkPositions and timeDelta / len(dirtyChunkPositions) or 0))
return
def _generateLightsIter(self, dirtyChunkPositions):
la = array(self.materials.lightAbsorption)
clip(la, 1, 15, la)
dirtyChunks = set(self.getChunk(*cPos) for cPos in dirtyChunkPositions)
workDone = 0
workTotal = len(dirtyChunks) * 29
progressInfo = (u"Lighting {0} chunks".format(len(dirtyChunks)))
log.info(progressInfo)
for i, chunk in enumerate(dirtyChunks):
chunk.chunkChanged()
yield i, workTotal, progressInfo
assert chunk.dirty and chunk.needsLighting
workDone += len(dirtyChunks)
workTotal = len(dirtyChunks)
for ch in list(dirtyChunks):
# relight all blocks in neighboring chunks in case their light source disappeared.
cx, cz = ch.chunkPosition
for dx, dz in itertools.product((-1, 0, 1), (-1, 0, 1)):
try:
ch = self.getChunk(cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
continue
dirtyChunks.add(ch)
ch.dirty = True
dirtyChunks = sorted(dirtyChunks, key=lambda x: x.chunkPosition)
workTotal += len(dirtyChunks) * 28
for i, chunk in enumerate(dirtyChunks):
chunk.BlockLight[:] = self.materials.lightEmission[chunk.Blocks]
chunk.dirty = True
zeroChunk = ZeroChunk(self.Height)
zeroChunk.BlockLight[:] = 0
zeroChunk.SkyLight[:] = 0
startingDirtyChunks = dirtyChunks
oldLeftEdge = zeros((1, 16, self.Height), 'uint8')
oldBottomEdge = zeros((16, 1, self.Height), 'uint8')
oldChunk = zeros((16, 16, self.Height), 'uint8')
if self.dimNo in (-1, 1):
lights = ("BlockLight",)
else:
lights = ("BlockLight", "SkyLight")
log.info(u"Dispersing light...")
def clipLight(light):
# light arrays are all uint8 by default, so when results go negative
# they become large instead. reinterpret as signed int using view()
# and then clip to range
light.view('int8').clip(0, 15, light)
for j, light in enumerate(lights):
zerochunkLight = getattr(zeroChunk, light)
newDirtyChunks = list(startingDirtyChunks)
work = 0
for i in range(14):
if len(newDirtyChunks) == 0:
workTotal -= len(startingDirtyChunks) * (14 - i)
break
progressInfo = u"{0} Pass {1}: {2} chunks".format(light, i, len(newDirtyChunks))
log.info(progressInfo)
# propagate light!
# for each of the six cardinal directions, figure a new light value for
# adjoining blocks by reducing this chunk's light by light absorption and fall off.
# compare this new light value against the old light value and update with the maximum.
#
# we calculate all chunks one step before moving to the next step, to ensure all gaps at chunk edges are filled.
# we do an extra cycle because lights sent across edges may lag by one cycle.
#
# xxx this can be optimized by finding the highest and lowest blocks
# that changed after one pass, and only calculating changes for that
# vertical slice on the next pass. newDirtyChunks would have to be a
# list of (cPos, miny, maxy) tuples or a cPos : (miny, maxy) dict
newDirtyChunks = set(newDirtyChunks)
newDirtyChunks.discard(zeroChunk)
dirtyChunks = sorted(newDirtyChunks, key=lambda x: x.chunkPosition)
newDirtyChunks = list()
for chunk in dirtyChunks:
(cx, cz) = chunk.chunkPosition
neighboringChunks = {}
for dir, dx, dz in ((FaceXDecreasing, -1, 0),
(FaceXIncreasing, 1, 0),
(FaceZDecreasing, 0, -1),
(FaceZIncreasing, 0, 1)):
try:
neighboringChunks[dir] = self.getChunk(cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
neighboringChunks[dir] = zeroChunk
neighboringChunks[dir].dirty = True
chunkLa = la[chunk.Blocks]
chunkLight = getattr(chunk, light)
oldChunk[:] = chunkLight[:]
### Spread light toward -X
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light)
oldLeftEdge[:] = ncLight[15:16, :, 0:self.Height] # save the old left edge
# left edge
newlight = (chunkLight[0:1, :, :self.Height] - la[nc.Blocks[15:16, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[15:16, :, 0:self.Height], newlight, ncLight[15:16, :, 0:self.Height])
# chunk body
newlight = (chunkLight[1:16, :, 0:self.Height] - chunkLa[0:15, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[0:15, :, 0:self.Height], newlight, chunkLight[0:15, :, 0:self.Height])
# right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light)
newlight = ncLight[0:1, :, :self.Height] - chunkLa[15:16, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[15:16, :, 0:self.Height], newlight, chunkLight[15:16, :, 0:self.Height])
### Spread light toward +X
# right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light)
newlight = (chunkLight[15:16, :, 0:self.Height] - la[nc.Blocks[0:1, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[0:1, :, 0:self.Height], newlight, ncLight[0:1, :, 0:self.Height])
# chunk body
newlight = (chunkLight[0:15, :, 0:self.Height] - chunkLa[1:16, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[1:16, :, 0:self.Height], newlight, chunkLight[1:16, :, 0:self.Height])
# left edge
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light)
newlight = ncLight[15:16, :, :self.Height] - chunkLa[0:1, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[0:1, :, 0:self.Height], newlight, chunkLight[0:1, :, 0:self.Height])
zerochunkLight[:] = 0 # zero the zero chunk after each direction
# so the lights it absorbed don't affect the next pass
# check if the left edge changed and dirty or compress the chunk appropriately
if (oldLeftEdge != ncLight[15:16, :, :self.Height]).any():
# chunk is dirty
newDirtyChunks.append(nc)
### Spread light toward -Z
# bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light)
oldBottomEdge[:] = ncLight[:, 15:16, :self.Height] # save the old bottom edge
newlight = (chunkLight[:, 0:1, :self.Height] - la[nc.Blocks[:, 15:16, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 15:16, :self.Height], newlight, ncLight[:, 15:16, :self.Height])
# chunk body
newlight = (chunkLight[:, 1:16, :self.Height] - chunkLa[:, 0:15, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 0:15, :self.Height], newlight, chunkLight[:, 0:15, :self.Height])
# top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light)
newlight = ncLight[:, 0:1, :self.Height] - chunkLa[:, 15:16, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 15:16, 0:self.Height], newlight, chunkLight[:, 15:16, 0:self.Height])
### Spread light toward +Z
# top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light)
newlight = (chunkLight[:, 15:16, :self.Height] - la[nc.Blocks[:, 0:1, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 0:1, :self.Height], newlight, ncLight[:, 0:1, :self.Height])
# chunk body
newlight = (chunkLight[:, 0:15, :self.Height] - chunkLa[:, 1:16, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 1:16, :self.Height], newlight, chunkLight[:, 1:16, :self.Height])
# bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light)
newlight = ncLight[:, 15:16, :self.Height] - chunkLa[:, 0:1, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 0:1, 0:self.Height], newlight, chunkLight[:, 0:1, 0:self.Height])
zerochunkLight[:] = 0
if (oldBottomEdge != ncLight[:, 15:16, :self.Height]).any():
newDirtyChunks.append(nc)
newlight = (chunkLight[:, :, 0:self.Height - 1] - chunkLa[:, :, 1:self.Height])
clipLight(newlight)
maximum(chunkLight[:, :, 1:self.Height], newlight, chunkLight[:, :, 1:self.Height])
newlight = (chunkLight[:, :, 1:self.Height] - chunkLa[:, :, 0:self.Height - 1])
clipLight(newlight)
maximum(chunkLight[:, :, 0:self.Height - 1], newlight, chunkLight[:, :, 0:self.Height - 1])
if (oldChunk != chunkLight).any():
newDirtyChunks.append(chunk)
work += 1
yield workDone + work, workTotal, progressInfo
workDone += work
workTotal -= len(startingDirtyChunks)
workTotal += work
work = 0
for ch in startingDirtyChunks:
ch.needsLighting = False
def TagProperty(tagName, tagType, default_or_func=None):
def getter(self):
if tagName not in self.root_tag["Data"]:
if hasattr(default_or_func, "__call__"):
default = default_or_func(self)
else:
default = default_or_func
self.root_tag["Data"][tagName] = tagType(default)
return self.root_tag["Data"][tagName].value
def setter(self, val):
self.root_tag["Data"][tagName] = tagType(value=val)
return property(getter, setter)
class AnvilWorldFolder(object):
def __init__(self, filename):
if not os.path.exists(filename):
os.mkdir(filename)
elif not os.path.isdir(filename):
raise IOError, "AnvilWorldFolder: Not a folder: %s" % filename
self.filename = filename
self.regionFiles = {}
# --- File paths ---
def getFilePath(self, path):
path = path.replace("/", os.path.sep)
return os.path.join(self.filename, path)
def getFolderPath(self, path):
path = self.getFilePath(path)
if not os.path.exists(path):
os.makedirs(path)
return path
# --- Region files ---
def getRegionFilename(self, rx, rz):
return os.path.join(self.getFolderPath("region"), "r.%s.%s.%s" % (rx, rz, "mca"))
def getRegionFile(self, rx, rz):
regionFile = self.regionFiles.get((rx, rz))
if regionFile:
return regionFile
regionFile = MCRegionFile(self.getRegionFilename(rx, rz), (rx, rz))
self.regionFiles[rx, rz] = regionFile
return regionFile
def getRegionForChunk(self, cx, cz):
rx = cx >> 5
rz = cz >> 5
return self.getRegionFile(rx, rz)
def closeRegions(self):
for rf in self.regionFiles.values():
rf.close()
self.regionFiles = {}
# --- Chunks and chunk listing ---
def tryLoadRegionFile(self, filepath):
filename = os.path.basename(filepath)
bits = filename.split('.')
if len(bits) < 4 or bits[0] != 'r' or bits[3] != "mca":
return None
try:
rx, rz = map(int, bits[1:3])
except ValueError:
return None
return MCRegionFile(filepath, (rx, rz))
def findRegionFiles(self):
regionDir = self.getFolderPath("region")
regionFiles = os.listdir(regionDir)
for filename in regionFiles:
yield os.path.join(regionDir, filename)
def listChunks(self):
chunks = set()
for filepath in self.findRegionFiles():
regionFile = self.tryLoadRegionFile(filepath)
if regionFile is None:
continue
if regionFile.offsets.any():
rx, rz = regionFile.regionCoords
self.regionFiles[rx, rz] = regionFile
for index, offset in enumerate(regionFile.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
cx += rx << 5
cz += rz << 5
chunks.add((cx, cz))
else:
log.info(u"Removing empty region file {0}".format(filepath))
regionFile.close()
os.unlink(regionFile.path)
return chunks
def containsChunk(self, cx, cz):
rx = cx >> 5
rz = cz >> 5
if not os.path.exists(self.getRegionFilename(rx, rz)):
return False
return self.getRegionForChunk(cx, cz).containsChunk(cx, cz)
def deleteChunk(self, cx, cz):
r = cx >> 5, cz >> 5
rf = self.getRegionFile(*r)
if rf:
rf.setOffset(cx & 0x1f, cz & 0x1f, 0)
if (rf.offsets == 0).all():
rf.close()
os.unlink(rf.path)
del self.regionFiles[r]
def readChunk(self, cx, cz):
if not self.containsChunk(cx, cz):
raise ChunkNotPresent((cx, cz))
return self.getRegionForChunk(cx, cz).readChunk(cx, cz)
def saveChunk(self, cx, cz, data):
regionFile = self.getRegionForChunk(cx, cz)
regionFile.saveChunk(cx, cz, data)
def copyChunkFrom(self, worldFolder, cx, cz):
fromRF = worldFolder.getRegionForChunk(cx, cz)
rf = self.getRegionForChunk(cx, cz)
rf.copyChunkFrom(fromRF, cx, cz)
class MCInfdevOldLevel(ChunkedLevelMixin, EntityLevel):
def __init__(self, filename=None, create=False, random_seed=None, last_played=None, readonly=False):
"""
Load an Alpha level from the given filename. It can point to either
a level.dat or a folder containing one. If create is True, it will
also create the world using the random_seed and last_played arguments.
If they are none, a random 64-bit seed will be selected for RandomSeed
and long(time.time() * 1000) will be used for LastPlayed.
If you try to create an existing world, its level.dat will be replaced.
"""
self.Length = 0
self.Width = 0
self.Height = 256
self.playerTagCache = {}
self.players = []
assert not (create and readonly)
if os.path.basename(filename) in ("level.dat", "level.dat_old"):
filename = os.path.dirname(filename)
if not os.path.exists(filename):
if not create:
raise IOError('File not found')
os.mkdir(filename)
if not os.path.isdir(filename):
raise IOError('File is not a Minecraft Alpha world')
self.worldFolder = AnvilWorldFolder(filename)
self.filename = self.worldFolder.getFilePath("level.dat")
self.readonly = readonly
if not readonly:
self.acquireSessionLock()
workFolderPath = self.worldFolder.getFolderPath("##MCEDIT.TEMP##")
if os.path.exists(workFolderPath):
# xxxxxxx Opening a world a second time deletes the first world's work folder and crashes when the first
# world tries to read a modified chunk from the work folder. This mainly happens when importing a world
# into itself after modifying it.
shutil.rmtree(workFolderPath, True)
self.unsavedWorkFolder = AnvilWorldFolder(workFolderPath)
# maps (cx, cz) pairs to AnvilChunk
self._loadedChunks = weakref.WeakValueDictionary()
# maps (cx, cz) pairs to AnvilChunkData
self._loadedChunkData = {}
self.chunksNeedingLighting = set()
self._allChunks = None
self.dimensions = {}
self.loadLevelDat(create, random_seed, last_played)
assert self.version == self.VERSION_ANVIL, "Pre-Anvil world formats are not supported (for now)"
self.playersFolder = self.worldFolder.getFolderPath("players")
self.players = [x[:-4] for x in os.listdir(self.playersFolder) if x.endswith(".dat")]
if "Player" in self.root_tag["Data"]:
self.players.append("Player")
self.preloadDimensions()
# --- Load, save, create ---
def _create(self, filename, random_seed, last_played):
# create a new level
root_tag = nbt.TAG_Compound()
root_tag["Data"] = nbt.TAG_Compound()
root_tag["Data"]["SpawnX"] = nbt.TAG_Int(0)
root_tag["Data"]["SpawnY"] = nbt.TAG_Int(2)
root_tag["Data"]["SpawnZ"] = nbt.TAG_Int(0)
if last_played is None:
last_played = long(time.time() * 1000)
if random_seed is None:
random_seed = long(random.random() * 0xffffffffffffffffL) - 0x8000000000000000L
self.root_tag = root_tag
root_tag["Data"]['version'] = nbt.TAG_Int(self.VERSION_ANVIL)
self.LastPlayed = long(last_played)
self.RandomSeed = long(random_seed)
self.SizeOnDisk = 0
self.Time = 1
self.LevelName = os.path.basename(self.worldFolder.filename)
### if singleplayer:
self.createPlayer("Player")
def acquireSessionLock(self):
lockfile = self.worldFolder.getFilePath("session.lock")
self.initTime = int(time.time() * 1000)
with file(lockfile, "wb") as f:
f.write(struct.pack(">q", self.initTime))
def checkSessionLock(self):
if self.readonly:
raise SessionLockLost, "World is opened read only."
lockfile = self.worldFolder.getFilePath("session.lock")
try:
(lock, ) = struct.unpack(">q", file(lockfile, "rb").read())
except struct.error:
lock = -1
if lock != self.initTime:
raise SessionLockLost, "Session lock lost. This world is being accessed from another location."
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
if create:
self._create(self.filename, random_seed, last_played)
self.saveInPlace()
else:
try:
self.root_tag = nbt.load(self.filename)
except Exception, e:
filename_old = self.worldFolder.getFilePath("level.dat_old")
log.info("Error loading level.dat, trying level.dat_old ({0})".format(e))
try:
self.root_tag = nbt.load(filename_old)
log.info("level.dat restored from backup.")
self.saveInPlace()
except Exception, e:
traceback.print_exc()
print repr(e)
log.info("Error loading level.dat_old. Initializing with defaults.")
self._create(self.filename, random_seed, last_played)
def saveInPlace(self):
if self.readonly:
raise IOError, "World is opened read only."
self.checkSessionLock()
for level in self.dimensions.itervalues():
level.saveInPlace(True)
dirtyChunkCount = 0
for chunk in self._loadedChunkData.itervalues():
cx, cz = chunk.chunkPosition
if chunk.dirty:
data = chunk.savedTagData()
dirtyChunkCount += 1
self.worldFolder.saveChunk(cx, cz, data)
chunk.dirty = False
for cx, cz in self.unsavedWorkFolder.listChunks():
if (cx, cz) not in self._loadedChunkData:
data = self.unsavedWorkFolder.readChunk(cx, cz)
self.worldFolder.saveChunk(cx, cz, data)
dirtyChunkCount += 1
self.unsavedWorkFolder.closeRegions()
shutil.rmtree(self.unsavedWorkFolder.filename, True)
os.mkdir(self.unsavedWorkFolder.filename)
for path, tag in self.playerTagCache.iteritems():
tag.save(path)
self.playerTagCache.clear()
self.root_tag.save(self.filename)
log.info(u"Saved {0} chunks (dim {1})".format(dirtyChunkCount, self.dimNo))
def unload(self):
"""
Unload all chunks and close all open filehandles.
"""
self.worldFolder.closeRegions()
if not self.readonly:
self.unsavedWorkFolder.closeRegions()
self._allChunks = None
self._loadedChunks.clear()
self._loadedChunkData.clear()
def close(self):
"""
Unload all chunks and close all open filehandles. Discard any unsaved data.
"""
self.unload()
try:
self.checkSessionLock()
shutil.rmtree(self.unsavedWorkFolder.filename, True)
except SessionLockLost:
pass
# --- Resource limits ---
loadedChunkLimit = 400
# --- Constants ---
GAMETYPE_SURVIVAL = 0
GAMETYPE_CREATIVE = 1
VERSION_MCR = 19132
VERSION_ANVIL = 19133
# --- Instance variables ---
materials = alphaMaterials
isInfinite = True
parentWorld = None
dimNo = 0
Height = 256
_bounds = None
# --- NBT Tag variables ---
SizeOnDisk = TagProperty('SizeOnDisk', nbt.TAG_Long, 0)
RandomSeed = TagProperty('RandomSeed', nbt.TAG_Long, 0)
Time = TagProperty('Time', nbt.TAG_Long, 0) # Age of the world in ticks. 20 ticks per second; 24000 ticks per day.
LastPlayed = TagProperty('LastPlayed', nbt.TAG_Long, lambda self: long(time.time() * 1000))
LevelName = TagProperty('LevelName', nbt.TAG_String, lambda self: self.displayName)
MapFeatures = TagProperty('MapFeatures', nbt.TAG_Byte, 1)
GameType = TagProperty('GameType', nbt.TAG_Int, 0) # 0 for survival, 1 for creative
version = TagProperty('version', nbt.TAG_Int, VERSION_ANVIL)
# --- World info ---
def __str__(self):
return "MCInfdevOldLevel(\"%s\")" % os.path.basename(self.worldFolder.filename)
@property
def displayName(self):
# shortname = os.path.basename(self.filename)
# if shortname == "level.dat":
shortname = os.path.basename(os.path.dirname(self.filename))
return shortname
@property
def bounds(self):
if self._bounds is None:
self._bounds = self.getWorldBounds()
return self._bounds
def getWorldBounds(self):
if self.chunkCount == 0:
return BoundingBox((0, 0, 0), (0, 0, 0))
allChunks = array(list(self.allChunks))
mincx = (allChunks[:, 0]).min()
maxcx = (allChunks[:, 0]).max()
mincz = (allChunks[:, 1]).min()
maxcz = (allChunks[:, 1]).max()
origin = (mincx << 4, 0, mincz << 4)
size = ((maxcx - mincx + 1) << 4, self.Height, (maxcz - mincz + 1) << 4)
return BoundingBox(origin, size)
@property
def size(self):
return self.bounds.size
# --- Format detection ---
@classmethod
def _isLevel(cls, filename):
if os.path.exists(os.path.join(filename, "chunks.dat")):
return False # exclude Pocket Edition folders
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in ("level.dat", "level.dat_old"):
return False
filename = os.path.dirname(filename)
files = os.listdir(filename)
if "level.dat" in files or "level.dat_old" in files:
return True
return False
# --- Dimensions ---
def preloadDimensions(self):
worldDirs = os.listdir(self.worldFolder.filename)
for dirname in worldDirs:
if dirname.startswith("DIM"):
try:
dimNo = int(dirname[3:])
log.info("Found dimension {0}".format(dirname))
dim = MCAlphaDimension(self, dimNo)
self.dimensions[dimNo] = dim
except Exception, e:
log.error(u"Error loading dimension {0}: {1}".format(dirname, e))
def getDimension(self, dimNo):
if self.dimNo != 0:
return self.parentWorld.getDimension(dimNo)
if dimNo in self.dimensions:
return self.dimensions[dimNo]
dim = MCAlphaDimension(self, dimNo, create=True)
self.dimensions[dimNo] = dim
return dim
# --- Region I/O ---
def preloadChunkPositions(self):
log.info(u"Scanning for regions...")
self._allChunks = self.worldFolder.listChunks()
if not self.readonly:
self._allChunks.update(self.unsavedWorkFolder.listChunks())
self._allChunks.update(self._loadedChunkData.iterkeys())
def getRegionForChunk(self, cx, cz):
return self.worldFolder.getRegionFile(cx, cz)
# --- Chunk I/O ---
def dirhash(self, n):
return self.dirhashes[n % 64]
def _dirhash(self):
n = self
n = n % 64
s = u""
if n >= 36:
s += u"1"
n -= 36
s += u"0123456789abcdefghijklmnopqrstuvwxyz"[n]
return s
dirhashes = [_dirhash(n) for n in range(64)]
def _oldChunkFilename(self, cx, cz):
return self.worldFolder.getFilePath("%s/%s/c.%s.%s.dat" % (self.dirhash(cx), self.dirhash(cz), base36(cx), base36(cz)))
def extractChunksInBox(self, box, parentFolder):
for cx, cz in box.chunkPositions:
if self.containsChunk(cx, cz):
self.extractChunk(cx, cz, parentFolder)
def extractChunk(self, cx, cz, parentFolder):
if not os.path.exists(parentFolder):
os.mkdir(parentFolder)
chunkFilename = self._oldChunkFilename(cx, cz)
outputFile = os.path.join(parentFolder, os.path.basename(chunkFilename))
chunk = self.getChunk(cx, cz)
chunk.root_tag.save(outputFile)
@property
def chunkCount(self):
"""Returns the number of chunks in the level. May initiate a costly
chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return len(self._allChunks)
@property
def allChunks(self):
"""Iterates over (xPos, zPos) tuples, one for each chunk in the level.
May initiate a costly chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return self._allChunks.__iter__()
def copyChunkFrom(self, world, cx, cz):
"""
Copy a chunk from world into the same chunk position in self.
"""
assert isinstance(world, MCInfdevOldLevel)
if self.readonly:
raise IOError, "World is opened read only."
self.checkSessionLock()
destChunk = self._loadedChunks.get((cx, cz))
sourceChunk = world._loadedChunks.get((cx, cz))
if sourceChunk:
if destChunk:
log.debug("Both chunks loaded. Using block copy.")
# Both chunks loaded. Use block copy.
self.copyBlocksFrom(world, destChunk.bounds, destChunk.bounds.origin)
return
else:
log.debug("Source chunk loaded. Saving into work folder.")
# Only source chunk loaded. Discard destination chunk and save source chunk in its place.
self._loadedChunkData.pop((cx, cz), None)
self.unsavedWorkFolder.saveChunk(cx, cz, sourceChunk.savedTagData())
return
else:
if destChunk:
log.debug("Destination chunk loaded. Using block copy.")
# Only destination chunk loaded. Use block copy.
self.copyBlocksFrom(world, destChunk.bounds, destChunk.bounds.origin)
else:
log.debug("No chunk loaded. Using world folder.copyChunkFrom")
# Neither chunk loaded. Copy via world folders.
self._loadedChunkData.pop((cx, cz), None)
# If the source chunk is dirty, write it to the work folder.
chunkData = world._loadedChunkData.pop((cx, cz), None)
if chunkData and chunkData.dirty:
data = chunkData.savedTagData()
world.unsavedWorkFolder.saveChunk(cx, cz, data)
if world.unsavedWorkFolder.containsChunk(cx, cz):
sourceFolder = world.unsavedWorkFolder
else:
sourceFolder = world.worldFolder
self.unsavedWorkFolder.copyChunkFrom(sourceFolder, cx, cz)
def _getChunkBytes(self, cx, cz):
if not self.readonly and self.unsavedWorkFolder.containsChunk(cx, cz):
return self.unsavedWorkFolder.readChunk(cx, cz)
else:
return self.worldFolder.readChunk(cx, cz)
def _getChunkData(self, cx, cz):
chunkData = self._loadedChunkData.get((cx, cz))
if chunkData is not None: return chunkData
try:
data = self._getChunkBytes(cx, cz)
root_tag = nbt.load(buf=data)
chunkData = AnvilChunkData(self, (cx, cz), root_tag)
except (MemoryError, ChunkNotPresent):
raise
except Exception, e:
raise ChunkMalformed, "Chunk {0} had an error: {1!r}".format((cx, cz), e), sys.exc_info()[2]
if not self.readonly and self.unsavedWorkFolder.containsChunk(cx, cz):
chunkData.dirty = True
self._storeLoadedChunkData(chunkData)
return chunkData
def _storeLoadedChunkData(self, chunkData):
if len(self._loadedChunkData) > self.loadedChunkLimit:
# Try to find a chunk to unload. The chunk must not be in _loadedChunks, which contains only chunks that
# are in use by another object. If the chunk is dirty, save it to the temporary folder.
if not self.readonly:
self.checkSessionLock()
for (ocx, ocz), oldChunkData in self._loadedChunkData.items():
if (ocx, ocz) not in self._loadedChunks:
if oldChunkData.dirty and not self.readonly:
data = oldChunkData.savedTagData()
self.unsavedWorkFolder.saveChunk(ocx, ocz, data)
del self._loadedChunkData[ocx, ocz]
break
self._loadedChunkData[chunkData.chunkPosition] = chunkData
def getChunk(self, cx, cz):
""" read the chunk from disk, load it, and return it."""
chunk = self._loadedChunks.get((cx, cz))
if chunk is not None:
return chunk
chunkData = self._getChunkData(cx, cz)
chunk = AnvilChunk(chunkData)
self._loadedChunks[cx, cz] = chunk
return chunk
def markDirtyChunk(self, cx, cz):
self.getChunk(cx, cz).chunkChanged()
def markDirtyBox(self, box):
for cx, cz in box.chunkPositions:
self.markDirtyChunk(cx, cz)
def listDirtyChunks(self):
for cPos, chunkData in self._loadedChunkData.iteritems():
if chunkData.dirty:
yield cPos
# --- HeightMaps ---
def heightMapAt(self, x, z):
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
heightMap = ch.HeightMap
return heightMap[zInChunk, xInChunk] # HeightMap indices are backwards
# --- Entities and TileEntities ---
def addEntity(self, entityTag):
assert isinstance(entityTag, nbt.TAG_Compound)
x, y, z = map(lambda x: int(floor(x)), Entity.pos(entityTag))
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return None
# raise Error, can't find a chunk?
chunk.addEntity(entityTag)
chunk.dirty = True
def tileEntityAt(self, x, y, z):
chunk = self.getChunk(x >> 4, z >> 4)
return chunk.tileEntityAt(x, y, z)
def addTileEntity(self, tileEntityTag):
assert isinstance(tileEntityTag, nbt.TAG_Compound)
if not 'x' in tileEntityTag:
return
x, y, z = TileEntity.pos(tileEntityTag)
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
# raise Error, can't find a chunk?
chunk.addTileEntity(tileEntityTag)
chunk.dirty = True
def getEntitiesInBox(self, box):
entities = []
for chunk, slices, point in self.getChunkSlices(box):
entities += chunk.getEntitiesInBox(box)
return entities
def removeEntitiesInBox(self, box):
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeEntitiesInBox(box)
log.info("Removed {0} entities".format(count))
return count
def removeTileEntitiesInBox(self, box):
count = 0
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeTileEntitiesInBox(box)
log.info("Removed {0} tile entities".format(count))
return count
# --- Chunk manipulation ---
def containsChunk(self, cx, cz):
if self._allChunks is not None:
return (cx, cz) in self._allChunks
if (cx, cz) in self._loadedChunkData:
return True
return self.worldFolder.containsChunk(cx, cz)
def containsPoint(self, x, y, z):
if y < 0 or y > 127:
return False
return self.containsChunk(x >> 4, z >> 4)
def createChunk(self, cx, cz):
if self.containsChunk(cx, cz):
raise ValueError("{0}:Chunk {1} already present!".format(self, (cx, cz)))
if self._allChunks is not None:
self._allChunks.add((cx, cz))
self._storeLoadedChunkData(AnvilChunkData(self, (cx, cz), create=True))
self._bounds = None
def createChunks(self, chunks):
i = 0
ret = []
for cx, cz in chunks:
i += 1
if not self.containsChunk(cx, cz):
ret.append((cx, cz))
self.createChunk(cx, cz)
assert self.containsChunk(cx, cz), "Just created {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
log.info(u"Chunk {0}...".format(i))
log.info("Created {0} chunks.".format(len(ret)))
return ret
def createChunksInBox(self, box):
log.info(u"Creating {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
return self.createChunks(box.chunkPositions)
def deleteChunk(self, cx, cz):
self.worldFolder.deleteChunk(cx, cz)
if self._allChunks is not None:
self._allChunks.discard((cx, cz))
self._bounds = None
def deleteChunksInBox(self, box):
log.info(u"Deleting {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
i = 0
ret = []
for cx, cz in itertools.product(xrange(box.mincx, box.maxcx), xrange(box.mincz, box.maxcz)):
i += 1
if self.containsChunk(cx, cz):
self.deleteChunk(cx, cz)
ret.append((cx, cz))
assert not self.containsChunk(cx, cz), "Just deleted {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
log.info(u"Chunk {0}...".format(i))
return ret
# --- Player and spawn manipulation ---
def playerSpawnPosition(self, player=None):
"""
xxx if player is None then it gets the default spawn position for the world
if player hasn't used a bed then it gets the default spawn position
"""
dataTag = self.root_tag["Data"]
if player is None:
playerSpawnTag = dataTag
else:
playerSpawnTag = self.getPlayerTag(player)
return [playerSpawnTag.get(i, dataTag[i]).value for i in ("SpawnX", "SpawnY", "SpawnZ")]
def setPlayerSpawnPosition(self, pos, player=None):
""" xxx if player is None then it sets the default spawn position for the world """
if player is None:
playerSpawnTag = self.root_tag["Data"]
else:
playerSpawnTag = self.getPlayerTag(player)
for name, val in zip(("SpawnX", "SpawnY", "SpawnZ"), pos):
playerSpawnTag[name] = nbt.TAG_Int(val)
def getPlayerPath(self, player):
assert player != "Player"
return os.path.join(self.playersFolder, "%s.dat" % player)
def getPlayerTag(self, player="Player"):
if player == "Player":
if player in self.root_tag["Data"]:
# single-player world
return self.root_tag["Data"]["Player"]
raise PlayerNotFound(player)
else:
playerFilePath = self.getPlayerPath(player)
if os.path.exists(playerFilePath):
# multiplayer world, found this player
playerTag = self.playerTagCache.get(playerFilePath)
if playerTag is None:
playerTag = nbt.load(playerFilePath)
self.playerTagCache[playerFilePath] = playerTag
return playerTag
else:
raise PlayerNotFound(player)
def getPlayerDimension(self, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag:
return 0
return playerTag["Dimension"].value
def setPlayerDimension(self, d, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag:
playerTag["Dimension"] = nbt.TAG_Int(0)
playerTag["Dimension"].value = d
def setPlayerPosition(self, pos, player="Player"):
posList = nbt.TAG_List([nbt.TAG_Double(p) for p in pos])
playerTag = self.getPlayerTag(player)
playerTag["Pos"] = posList
def getPlayerPosition(self, player="Player"):
playerTag = self.getPlayerTag(player)
posList = playerTag["Pos"]
pos = map(lambda x: x.value, posList)
return pos
def setPlayerOrientation(self, yp, player="Player"):
self.getPlayerTag(player)["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Player"):
""" returns (yaw, pitch) """
yp = map(lambda x: x.value, self.getPlayerTag(player)["Rotation"])
y, p = yp
if p == 0:
p = 0.000000001
if p == 180.0:
p -= 0.000000001
yp = y, p
return array(yp)
def setPlayerAbilities(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# Check for the Abilities tag. It will be missing in worlds from before
# Beta 1.9 Prerelease 5.
if not 'abilities' in playerTag:
playerTag['abilities'] = nbt.TAG_Compound()
# Assumes creative (1) is the only mode with these abilities set,
# which is true for now. Future game modes may not hold this to be
# true, however.
if gametype == 1:
playerTag['abilities']['instabuild'] = nbt.TAG_Byte(1)
playerTag['abilities']['mayfly'] = nbt.TAG_Byte(1)
playerTag['abilities']['invulnerable'] = nbt.TAG_Byte(1)
else:
playerTag['abilities']['flying'] = nbt.TAG_Byte(0)
playerTag['abilities']['instabuild'] = nbt.TAG_Byte(0)
playerTag['abilities']['mayfly'] = nbt.TAG_Byte(0)
playerTag['abilities']['invulnerable'] = nbt.TAG_Byte(0)
def setPlayerGameType(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# This annoyingly works differently between single- and multi-player.
if player == "Player":
self.GameType = gametype
self.setPlayerAbilities(gametype, player)
else:
playerTag['playerGameType'] = nbt.TAG_Int(gametype)
self.setPlayerAbilities(gametype, player)
def getPlayerGameType(self, player="Player"):
if player == "Player":
return self.GameType
else:
playerTag = self.getPlayerTag(player)
return playerTag["playerGameType"].value
def createPlayer(self, playerName):
if playerName == "Player":
playerTag = self.root_tag["Data"].setdefault(playerName, nbt.TAG_Compound())
else:
playerTag = nbt.TAG_Compound()
playerTag['Air'] = nbt.TAG_Short(300)
playerTag['AttackTime'] = nbt.TAG_Short(0)
playerTag['DeathTime'] = nbt.TAG_Short(0)
playerTag['Fire'] = nbt.TAG_Short(-20)
playerTag['Health'] = nbt.TAG_Short(20)
playerTag['HurtTime'] = nbt.TAG_Short(0)
playerTag['Score'] = nbt.TAG_Int(0)
playerTag['FallDistance'] = nbt.TAG_Float(0)
playerTag['OnGround'] = nbt.TAG_Byte(0)
playerTag["Inventory"] = nbt.TAG_List()
playerTag['Motion'] = nbt.TAG_List([nbt.TAG_Double(0) for i in range(3)])
playerTag['Pos'] = nbt.TAG_List([nbt.TAG_Double([0.5, 2.8, 0.5][i]) for i in range(3)])
playerTag['Rotation'] = nbt.TAG_List([nbt.TAG_Float(0), nbt.TAG_Float(0)])
if playerName != "Player":
if self.readonly:
raise IOError, "World is opened read only."
self.checkSessionLock()
playerTag.save(self.getPlayerPath(playerName))
class MCAlphaDimension (MCInfdevOldLevel):
def __init__(self, parentWorld, dimNo, create=False):
filename = parentWorld.worldFolder.getFolderPath("DIM" + str(int(dimNo)))
self.parentWorld = parentWorld
MCInfdevOldLevel.__init__(self, filename, create)
self.dimNo = dimNo
self.filename = parentWorld.filename
self.players = self.parentWorld.players
self.playersFolder = self.parentWorld.playersFolder
self.playerTagCache = self.parentWorld.playerTagCache
@property
def root_tag(self):
return self.parentWorld.root_tag
def __str__(self):
return u"MCAlphaDimension({0}, {1})".format(self.parentWorld, self.dimNo)
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
pass
def preloadDimensions(self):
pass
def _create(self, *args, **kw):
pass
def acquireSessionLock(self):
pass
def checkSessionLock(self):
self.parentWorld.checkSessionLock()
dimensionNames = {-1: "Nether", 1: "The End"}
@property
def displayName(self):
return u"{0} ({1})".format(self.parentWorld.displayName,
self.dimensionNames.get(self.dimNo, "Dimension %d" % self.dimNo))
def saveInPlace(self, saveSelf=False):
"""saving the dimension will save the parent world, which will save any
other dimensions that need saving. the intent is that all of them can
stay loaded at once for fast switching """
if saveSelf:
MCInfdevOldLevel.saveInPlace(self)
else:
self.parentWorld.saveInPlace()
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/util/pymclevel/infiniteworld.py
|
Python
|
agpl-3.0
| 61,606
|
#!/usr/bin/env python2
import sys
import os
import gettext
import glob
from optparse import OptionParser
import shutil
import gi
gi.require_version("Gtk", "3.0")
gi.require_version("CMenu", "3.0")
from gi.repository import GLib, Gtk, Gio, CMenu, GdkPixbuf
sys.path.insert(0, '/usr/share/cinnamon/cinnamon-menu-editor')
from cme import util
sys.path.insert(0, '/usr/share/cinnamon/cinnamon-settings')
from bin import XletSettingsWidgets
# i18n
gettext.install("cinnamon", "/usr/share/locale")
# i18n for menu item
_ = gettext.gettext
home = os.path.expanduser("~")
PANEL_LAUNCHER_PATH = os.path.join(home, ".cinnamon", "panel-launchers")
EXTENSIONS = (".png", ".xpm", ".svg")
def escape_space(string):
return string.replace(" ", "\ ")
def try_icon_name(filename):
# Detect if the user picked an icon, and make
# it into an icon name.
if not filename.endswith(EXTENSIONS):
return filename
noext_filename = filename[:-4]
theme = Gtk.IconTheme.get_default()
resolved_path = None
for path in theme.get_search_path():
if noext_filename.startswith(path):
resolved_path = noext_filename[len(path):].lstrip(os.sep)
break
if resolved_path is None:
return filename
parts = resolved_path.split(os.sep)
# icon-theme/size/category/icon
if len(parts) != 4:
return filename
return parts[3]
def get_icon_string(image):
filename = image._file
if filename is not None:
return try_icon_name(filename)
return image._icon_name
def strip_extensions(icon):
if icon.endswith(EXTENSIONS):
return icon[:-4]
else:
return icon
def set_icon_string(image, icon):
if GLib.path_is_absolute(icon):
image._file = icon
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(icon, 64, 64)
if pixbuf is not None:
image.set_from_pixbuf(pixbuf)
else:
image._icon_name = strip_extensions(icon)
image.set_from_icon_name(strip_extensions(icon), Gtk.IconSize.BUTTON)
def ask(msg):
dialog = Gtk.MessageDialog(None,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
None)
dialog.set_default_size(400, 200)
dialog.set_markup(msg)
dialog.show_all()
response = dialog.run()
dialog.destroy()
return response == Gtk.ResponseType.YES
DESKTOP_GROUP = GLib.KEY_FILE_DESKTOP_GROUP
class IconPicker(object):
def __init__(self, dialog, button, image):
self.dialog = dialog
self.button = button
self.button.connect('clicked', self.pick_icon)
self.image = image
def pick_icon(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose an icon"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
chooser.add_shortcut_folder("/usr/share/pixmaps")
chooser.add_shortcut_folder("/usr/share/icons")
fn = get_icon_string(self.image)
if fn:
if GLib.path_is_absolute(fn):
chooser.set_filename(fn)
else:
theme = Gtk.IconTheme.get_default()
icon_info = theme.lookup_icon(fn, 64, 0)
icon_info_fn = icon_info.get_filename() if icon_info != None else None
if icon_info_fn:
chooser.set_filename(icon_info_fn)
filter = Gtk.FileFilter()
filter.add_pixbuf_formats ()
chooser.set_filter(filter)
preview = Gtk.Image()
chooser.set_preview_widget(preview)
chooser.connect("update-preview", self.update_icon_preview_cb, preview)
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
set_icon_string (self.image, chooser.get_filename())
chooser.destroy()
def update_icon_preview_cb(self, chooser, preview):
filename = chooser.get_preview_filename()
if filename is None:
return
chooser.set_preview_widget_active(False)
if os.path.isfile(filename):
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(filename, 128, 128)
if pixbuf is not None:
preview.set_from_pixbuf(pixbuf)
chooser.set_preview_widget_active(True)
class ItemEditor(object):
ui_file = None
def __init__(self, item_path=None, callback=None, destdir=None):
self.builder = Gtk.Builder()
self.builder.add_from_file(self.ui_file)
self.callback = callback
self.destdir = destdir
self.dialog = self.builder.get_object('editor')
self.dialog.connect('response', self.on_response)
icon = self.builder.get_object('icon-image')
icon._file = None
icon._icon_name = None
self.build_ui()
self.item_path = item_path
self.load()
self.check_custom_path()
self.resync_validity()
def build_ui(self):
raise NotImplementedError()
def check_custom_path(self):
raise NotImplementedError()
def sync_widgets(self, name_valid, exec_valid):
if name_valid:
self.builder.get_object('name-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'ok')
self.builder.get_object('name-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("Valid name"))
else:
self.builder.get_object('name-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'stop')
self.builder.get_object('name-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("The name cannot be empty."))
if exec_valid:
self.builder.get_object('exec-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'ok')
self.builder.get_object('exec-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("Valid executable"))
else:
self.builder.get_object('exec-entry').set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, 'stop')
self.builder.get_object('exec-entry').set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
_("The executable is not valid. It cannot be empty and spaces in the path must be escaped with backslash (\\)."))
self.builder.get_object('ok').set_sensitive(name_valid and exec_valid)
def validate_exec_line(self, string):
try:
success, parsed = GLib.shell_parse_argv(string)
if GLib.find_program_in_path(parsed[0]) or ((not os.path.isdir(parsed[0])) and os.access(parsed[0], os.X_OK)):
return True
except:
pass
return False
def get_keyfile_edits(self):
raise NotImplementedError()
def set_text(self, ctl, name):
try:
val = self.keyfile.get_string(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
self.builder.get_object(ctl).set_text(val)
def set_check(self, ctl, name):
try:
val = self.keyfile.get_boolean(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
self.builder.get_object(ctl).set_active(val)
def set_icon(self, ctl, name):
try:
val = self.keyfile.get_string(DESKTOP_GROUP, name)
except GLib.GError:
pass
else:
set_icon_string(self.builder.get_object(ctl), val)
def load(self):
self.keyfile = GLib.KeyFile()
path = self.item_path or ""
try:
self.keyfile.load_from_file(path, util.KEY_FILE_FLAGS)
except GLib.GError:
pass
def save(self):
util.fillKeyFile(self.keyfile, self.get_keyfile_edits())
contents, length = self.keyfile.to_data()
need_exec = False
if self.destdir is not None:
self.item_path = os.path.join(self.destdir, self.builder.get_object('name-entry').get_text() + ".desktop")
need_exec = True
try:
with open(self.item_path, 'w') as f:
f.write(contents)
if need_exec:
os.chmod(self.item_path, 0o755)
except IOError:
if ask(_("Cannot create the launcher at this location. Add to the desktop instead?")):
self.destdir = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)
self.save()
def run(self):
self.dialog.present()
def on_response(self, dialog, response):
if response == Gtk.ResponseType.OK:
self.save()
self.callback(True, self.item_path)
else:
self.callback(False, self.item_path)
self.dialog.destroy()
class LauncherEditor(ItemEditor):
ui_file = '/usr/share/cinnamon/cinnamon-desktop-editor/launcher-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('exec-browse').connect('clicked', self.pick_exec)
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
self.builder.get_object('exec-entry').connect('changed', self.resync_validity)
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
exec_text = self.builder.get_object('exec-entry').get_text().strip()
name_valid = name_text is not ""
exec_valid = self.validate_exec_line(exec_text)
self.sync_widgets(name_valid, exec_valid)
def load(self):
super(LauncherEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('exec-entry', "Exec")
self.set_text('comment-entry', "Comment")
self.set_check('terminal-check', "Terminal")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Exec=self.builder.get_object('exec-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Terminal=self.builder.get_object('terminal-check').get_active(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Application")
def pick_exec(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose a command"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
self.builder.get_object('exec-entry').set_text(escape_space(chooser.get_filename()))
chooser.destroy()
def check_custom_path(self):
pass
class DirectoryEditor(ItemEditor):
ui_file = '/usr/share/cinnamon/cinnamon-desktop-editor/directory-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
valid = (name_text is not "")
self.builder.get_object('ok').set_sensitive(valid)
def load(self):
super(DirectoryEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('comment-entry', "Comment")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Directory")
def check_custom_path(self):
pass
class CinnamonLauncherEditor(ItemEditor):
ui_file = '/usr/share/cinnamon/cinnamon-desktop-editor/launcher-editor.ui'
def build_ui(self):
self.icon_picker = IconPicker(self.dialog,
self.builder.get_object('icon-button'),
self.builder.get_object('icon-image'))
self.builder.get_object('exec-browse').connect('clicked', self.pick_exec)
self.builder.get_object('name-entry').connect('changed', self.resync_validity)
self.builder.get_object('exec-entry').connect('changed', self.resync_validity)
def check_custom_path(self):
dir = Gio.file_new_for_path(PANEL_LAUNCHER_PATH)
if not dir.query_exists(None):
dir.make_directory_with_parents(None)
if self.item_path is None or "cinnamon-custom-launcher" not in self.item_path:
i = 1
while True:
name = os.path.join(PANEL_LAUNCHER_PATH, 'cinnamon-custom-launcher-' + str(i) + '.desktop')
file = Gio.file_parse_name(name)
if not file.query_exists(None):
break
i += 1
self.item_path = name
def resync_validity(self, *args):
name_text = self.builder.get_object('name-entry').get_text().strip()
exec_text = self.builder.get_object('exec-entry').get_text().strip()
name_valid = name_text is not ""
exec_valid = self.validate_exec_line(exec_text)
self.sync_widgets(name_valid, exec_valid)
def load(self):
super(CinnamonLauncherEditor, self).load()
self.set_text('name-entry', "Name")
self.set_text('exec-entry', "Exec")
self.set_text('comment-entry', "Comment")
self.set_check('terminal-check', "Terminal")
self.set_icon('icon-image', "Icon")
def get_keyfile_edits(self):
return dict(Name=self.builder.get_object('name-entry').get_text(),
Exec=self.builder.get_object('exec-entry').get_text(),
Comment=self.builder.get_object('comment-entry').get_text(),
Terminal=self.builder.get_object('terminal-check').get_active(),
Icon=get_icon_string(self.builder.get_object('icon-image')),
Type="Application")
def pick_exec(self, button):
chooser = Gtk.FileChooserDialog(title=_("Choose a command"),
parent=self.dialog,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
response = chooser.run()
if response == Gtk.ResponseType.ACCEPT:
self.builder.get_object('exec-entry').set_text(escape_space(chooser.get_filename()))
chooser.destroy()
class Main:
def __init__(self):
parser = OptionParser()
parser.add_option("-o", "--original", dest="original_desktop_file", help="Path of original .desktop file", metavar="ORIG_FILE")
parser.add_option("-d", "--directory", dest="destination_directory", help="Destination directory of the new launcher", metavar="DEST_DIR")
parser.add_option("-f", "--file", dest="desktop_file", help="Name of desktop file (i.e. gnome-terminal.desktop)", metavar="DESKTOP_NAME")
parser.add_option("-m", "--mode", dest="mode", default=None, help="Mode to run in: launcher, directory, panel-launcher or nemo-launcher")
(options, args) = parser.parse_args()
if not options.mode:
parser.error("You must select a mode to run in")
if options.mode in ("directory", "launcher") and not options.original_desktop_file:
parser.error("directory and launcher modes must be accompanied by the -o argument")
if options.mode == "nemo-launcher" and not options.destination_directory:
parser.error("nemo-launcher mode must be accompanied by the -d argument")
if options.mode == "cinnamon-launcher" and len(args) < 3:
parser.error("cinnamon-launcher mode must have the following syntax:\n"
"cinnamon-desktop-editor -mcinnamon-launcher [-ffoo.desktop] <uuid> <instance-id> <json-path>")
self.tree = CMenu.Tree.new("cinnamon-applications.menu", CMenu.TreeFlags.INCLUDE_NODISPLAY)
if not self.tree.load_sync():
raise ValueError("can not load menu tree")
self.mode = options.mode
self.orig_file = options.original_desktop_file
self.desktop_file = options.desktop_file
self.dest_dir = options.destination_directory
if options.mode == "cinnamon-launcher":
self.uuid = args[0]
self.iid = args[1]
self.json_path = args[2]
if self.desktop_file is not None:
self.get_desktop_path()
if self.mode == "directory":
editor = DirectoryEditor(self.orig_file, self.directory_cb)
editor.dialog.show_all()
elif self.mode == "launcher":
editor = LauncherEditor(self.orig_file, self.launcher_cb)
editor.dialog.show_all()
elif self.mode == "cinnamon-launcher":
editor = CinnamonLauncherEditor(self.orig_file, self.panel_launcher_cb)
editor.dialog.show_all()
elif self.mode == "nemo-launcher":
editor = LauncherEditor(self.orig_file, self.nemo_launcher_cb, self.dest_dir)
editor.dialog.show_all()
else:
print "Invalid args"
def directory_cb(self, success, dest_path):
self.end()
def launcher_cb(self, success, dest_path):
self.end()
def panel_launcher_cb(self, success, dest_path):
if success:
factory = XletSettingsWidgets.Factory(self.json_path, self.iid, False, self.uuid)
launchers = factory.settings.get_value("launcherList")
if self.desktop_file is None:
launchers.append(os.path.split(dest_path)[1])
else:
i = launchers.index(self.desktop_file)
if i >= 0:
del launchers[i]
launchers.insert(i, os.path.split(dest_path)[1])
factory.settings.set_value("launcherList", launchers)
if self.desktop_file is None:
self.ask_menu_launcher(dest_path)
self.end()
def nemo_launcher_cb(self, success, dest_path):
if success:
self.ask_menu_launcher(dest_path)
self.end()
def ask_menu_launcher(self, dest_path):
if ask(_("Would you like to add this launcher to the menu also? It will be placed in the Other category initially.")):
new_file_path = os.path.join(util.getUserItemPath(), os.path.split(dest_path)[1])
shutil.copy(dest_path, new_file_path)
def get_desktop_path(self):
self.search_menu_sys()
if self.orig_file is None:
panel_launchers = glob.glob(os.path.join(PANEL_LAUNCHER_PATH, "*.desktop"))
for launcher in panel_launchers:
if os.path.split(launcher)[1] == self.desktop_file:
self.orig_file = launcher
def search_menu_sys(self, parent=None):
if parent is None:
parent = self.tree.get_root_directory()
item_iter = parent.iter()
item_type = item_iter.next()
while item_type != CMenu.TreeItemType.INVALID:
if item_type == CMenu.TreeItemType.DIRECTORY:
item = item_iter.get_directory()
self.search_menu_sys(item)
elif item_type == CMenu.TreeItemType.ENTRY:
item = item_iter.get_entry()
if item.get_desktop_file_id() == self.desktop_file:
self.orig_file = item.get_desktop_file_path()
item_type = item_iter.next()
def end(self):
Gtk.main_quit()
if __name__ == "__main__":
Gtk.Window.set_default_icon_name('gnome-panel-launcher')
Main()
Gtk.main()
|
RavetcoFX/Cinnamon
|
files/usr/share/cinnamon/cinnamon-desktop-editor/cinnamon-desktop-editor.py
|
Python
|
gpl-2.0
| 20,812
|
from django.db import models
from django.db.models import Sum
from django.utils.translation import ugettext as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from smartmin.models import SmartModel
from model_utils import Choices
from model_utils.models import TimeStampedModel
from django_fsm import FSMField, transition
class Breed(SmartModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class Color(SmartModel):
# http://www.sss-mag.com/fernhill/cowcolor.html
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Breeder(SmartModel):
name = models.CharField(max_length=30)
def __unicode__(self):
return self.name
class Sire(SmartModel):
name = models.CharField(max_length=30, blank=False)
code = models.CharField(max_length=10, blank=True)
breed = models.ForeignKey(Breed, null=True, blank=True)
birth_date = models.DateField(null=True, blank=True)
breeder = models.ForeignKey(Breeder, null=True, blank=True, related_name='sire_breeder')
def __unicode__(self):
return self.name
class Dam(SmartModel):
name = models.CharField(max_length=30)
breed = models.ForeignKey(Breed, null=True, blank=True)
code = models.CharField(max_length=10, blank=True)
birth_date = models.DateField(null=True, blank=True)
animal = models.ForeignKey('animals.Animal', null=True, blank=True, related_name='dam_animal')
breeder = models.ForeignKey(Breeder, null=True, blank=True, related_name='dam_breeder')
def __unicode__(self):
return self.name
class Animal(SmartModel):
state = FSMField(default='open')
@transition(field=state, source='*', target='open')
def open(self):
pass
@transition(field=state, source='*', target='served')
def served(self):
pass
@transition(field=state, source='*', target='pregnant')
def pregnant(self):
pass
@transition(field=state, source='*', target='lactating')
def lactating(self):
pass
@transition(field=state, source='*', target='disposed')
def disposed(self):
pass
# Choices
SEX_CHOICES = Choices((None, '------'), ('female', _('Female')), ('male', _('Male')), )
# Identification
ear_tag = models.CharField(max_length=30, blank=False)
name = models.CharField(max_length=30, blank=False)
# Description
color = models.ForeignKey(Color, null=True, blank=True)
sex = models.CharField(choices=SEX_CHOICES, max_length=20)
breed = models.ForeignKey(Breed, null=True, blank=True)
sire = models.ForeignKey(Sire, null=True, blank=True, related_name='sire')
dam = models.ForeignKey(Dam, null=True, blank=True, related_name='dam')
# Calfhood
birth_date = models.DateField(null=True, blank=True)
birth_weight = models.IntegerField(null=True, blank=True)
weaning_date = models.DateField(null=True, blank=True)
weaning_weight = models.IntegerField(null=True, blank=True)
yearling_date = models.DateField(null=True, blank=True)
yearling_weight = models.IntegerField(null=True, blank=True)
farm = models.ForeignKey('users.User', null=True, blank=True)
def __unicode__(self):
return '%s-%s' % (self.ear_tag, self.name)
@property
def date_of_first_service(self):
try:
date = self.animal_services.earliest('date').date
except Service.DoesNotExist:
date = ''
return date
@property
def number_of_services(self):
return self.animal_services.count()
@property
def number_of_successful_services(self):
return self.pregnancy_checks.filter(result=PregnancyCheck.RESULT_CHOICES.pregnant).exclude(service=None).count()
@property
def number_of_failed_services(self):
return self.pregnancy_checks.filter(result=PregnancyCheck.RESULT_CHOICES.open).exclude(service=None).count()
@property
def all_time_production(self):
return self.milkproduction.aggregate(Sum('amount'))['amount__sum']
@receiver(post_save, sender=Animal)
def add_dam(sender, **kwargs):
animal = kwargs['instance']
if animal.sex == Animal.SEX_CHOICES.female:
Dam.objects.create(animal=animal, name=animal.name, breed=animal.breed, birth_date=animal.birth_date, created_by=animal.created_by,
modified_by=animal.modified_by)
class MilkProduction(SmartModel):
# Choices
TIME_CHOICES = Choices(('am', _('Morning')), ('pm', _('Evening')))
animal = models.ForeignKey(Animal, null=False, blank=False, related_name='milkproduction')
time = models.CharField(choices=TIME_CHOICES, max_length=10, null=False, blank=False)
amount = models.DecimalField(max_digits=5, decimal_places=2)
butterfat = models.DecimalField(max_digits=5, decimal_places=3, null=True)
date = models.DateField()
class Service(SmartModel):
# Choices
METHOD_CHOICES = Choices(('artificial_insemination', _('Artificial Insemination')), ('natural_service', _('Natural Service')),)
animal = models.ForeignKey(Animal, null=False, blank=False, related_name='animal_services')
method = models.CharField(choices=METHOD_CHOICES, max_length=30, default=METHOD_CHOICES.artificial_insemination, blank=False)
sire = models.ForeignKey(Sire, null=False, blank=False, related_name='sire_services')
date = models.DateField()
notes = models.CharField(max_length=200, blank=True)
def __unicode__(self):
return 'Sire: ' + str(self.sire)
class PregnancyCheck(SmartModel):
# Choices
RESULT_CHOICES = Choices(('pregnant', _('Pregnant')), ('open', _('Open')),)
CHECK_METHOD_CHOICES = Choices(('palpation', _('Palpation')), ('ultrasound', _('Ultrasound')), ('observation', _('Observation')), ('blood', _('Blood')))
service = models.ForeignKey(Service, null=True, blank=True, related_name='pregnancy_checks')
animal = models.ForeignKey(Animal, null=False, blank=False, related_name='pregnancy_checks')
result = models.CharField(choices=RESULT_CHOICES, max_length=20)
check_method = models.CharField(choices=CHECK_METHOD_CHOICES, max_length=20)
date = models.DateField()
class LactationPeriod(SmartModel):
animal = models.ForeignKey(Animal, null=False, blank=False, related_name='animal_lactation_periods')
calves = models.ManyToManyField(Animal, null=False, blank=False, related_name='calf_lactation_periods')
start_date = models.DateField()
end_date = models.DateField(null=True, blank=True)
|
savioabuga/phoenix
|
phoenix/animals/models.py
|
Python
|
bsd-3-clause
| 6,605
|
#!/usr/bin/python
import urllib2
import sys
def print_http_respose_header(url):
try:
response = urllib2.urlopen(url)
for key, value in response.info().items():
print key + ' => ' + value
except:
print 'error message'
def main():
print_http_respose_header(sys.argv[1])
if __name__ == '__main__':
main()
|
ajitabhpandey/learn-programming
|
python/httpResponseHeader.py
|
Python
|
gpl-2.0
| 319
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequence feature extraction class for common feature extractors to preprocess sequences.
"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .file_utils import (
PaddingStrategy,
TensorType,
_is_tensorflow,
_is_torch,
is_tf_available,
is_torch_available,
to_numpy,
)
from .utils import logging
logger = logging.get_logger(__name__)
class SequenceFeatureExtractor(FeatureExtractionMixin):
"""
This is a general feature extraction class for speech recognition.
Args:
feature_size (`int`):
The feature dimension of the extracted features.
sampling_rate (`int`):
The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).
padding_value (`float`):
The value that is used to fill the padding values / vectors.
"""
def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs):
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.padding_side = kwargs.pop("padding_side", "right")
self.return_attention_mask = kwargs.pop("return_attention_mask", True)
super().__init__(**kwargs)
def pad(
self,
processed_features: Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
"""
Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the
max sequence length in the batch.
Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`,
`self.padding_value`)
<Tip>
If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`):
Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of
input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str,
List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `List[float]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
see the note above for the return type.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)):
processed_features = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature` to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(processed_features.keys())}"
)
required_input = processed_features[self.model_input_names[0]]
return_attention_mask = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if not required_input:
if return_attention_mask:
processed_features["attention_mask"] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
index = 0
while len(required_input[index]) == 0:
index += 1
if index < len(required_input):
first_element = required_input[index][0]
if return_tensors is None:
if is_tf_available() and _is_tensorflow(first_element):
return_tensors = "tf"
elif is_torch_available() and _is_torch(first_element):
return_tensors = "pt"
elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
return_tensors = "np"
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
f"Should be one of a python, numpy, pytorch or tensorflow object."
)
for key, value in processed_features.items():
if isinstance(value[0], (int, float)):
processed_features[key] = to_numpy(value)
else:
processed_features[key] = [to_numpy(v) for v in value]
# Convert padding_strategy in PaddingStrategy
padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length)
required_input = processed_features[self.model_input_names[0]]
batch_size = len(required_input)
if not all(len(v) == batch_size for v in processed_features.values()):
raise ValueError("Some items in the output dictionary have a different batch size than others.")
truncated_inputs = []
for i in range(batch_size):
inputs = dict((k, v[i]) for k, v in processed_features.items())
# truncation
inputs_slice = self._truncate(
inputs,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
truncation=truncation,
)
truncated_inputs.append(inputs_slice)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
# padding
outputs = self._pad(
truncated_inputs[i],
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
if value.dtype is np.dtype(np.float64):
value = value.astype(np.float32)
batch_outputs[key].append(value)
return BatchFeature(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
processed_features: Union[Dict[str, np.ndarray], BatchFeature],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad inputs (on left/right and up to predefined length or max length in the batch)
Args:
processed_features:
Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
max_length: maximum length of the returned list and optionally padding length (see below)
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The feature_extractor padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
required_input = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
processed_features["attention_mask"] = np.pad(
processed_features["attention_mask"], (0, difference)
)
padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
processed_features[self.model_input_names[0]] = np.pad(
required_input, padding_shape, "constant", constant_values=self.padding_value
)
elif self.padding_side == "left":
if return_attention_mask:
processed_features["attention_mask"] = np.pad(
processed_features["attention_mask"], (difference, 0)
)
padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
processed_features[self.model_input_names[0]] = np.pad(
required_input, padding_shape, "constant", constant_values=self.padding_value
)
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return processed_features
def _truncate(
self,
processed_features: Union[Dict[str, np.ndarray], BatchFeature],
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
truncation: Optional[bool] = None,
):
"""
Truncate inputs to predefined length or max length in the batch
Args:
processed_features:
Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
max_length: maximum length of the returned list and optionally padding length (see below)
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
truncation:
(optional) Activates truncation to cut input sequences longer than `max_length` to `max_length`.
"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
required_input = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_truncated = len(required_input) > max_length
if needs_to_be_truncated:
processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
processed_features["attention_mask"] = processed_features["attention_mask"][:max_length]
return processed_features
def _get_padding_strategies(self, padding=False, max_length=None):
"""
Find the correct padding strategy
"""
# Get padding strategy
if padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that" f" max_length is defined"
)
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. "
"Please select a value to use as `padding_value`. For example: `feature_extractor.padding_value = 0.0`."
)
return padding_strategy
|
huggingface/transformers
|
src/transformers/feature_extraction_sequence_utils.py
|
Python
|
apache-2.0
| 18,114
|
import pygame.camera
import pygame.image
from datetime import datetime
from datetime import timedelta
import signal # Import Signals
import sys
class EventTimer(object):
def __init__(self, hours = 0, minutes = 0, seconds = 0):
self.__delta = timedelta(hours = hours, minutes = minutes, seconds = seconds)
self.__ftime = True
def run(self):
self.__inTimer = datetime.now()
def ready(self):
if self.__ftime:
self.__ftime = False
self.__inTimer = datetime.now()
return True
chk_time = datetime.now()
if chk_time - self.__inTimer >= self.__delta:
self.__inTimer = datetime.now()
return True
else:
return False
AntiSpam = EventTimer(minutes = 1)
AntiSpam.run()
def takePhoto():
pygame.camera.init()
cam = pygame.camera.Camera(pygame.camera.list_cameras()[0])
cam.start()
img = cam.get_image()
pygame.image.save(img, "photo.jpg")
cam.stop()
pygame.camera.quit()
def signalHandler(signal, frame):
print 'You pressed Ctrl+C!'
sys.exit(0)
while True:
signal.signal(signal.SIGINT, signalHandler)
if AntiSpam.ready():
print "Photo taked - {}".format(datetime.now())
takePhoto()
|
Azrrael-exe/RPiPhotoShoot
|
Tests/takePhoto.py
|
Python
|
mit
| 1,169
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleangenre
from resources.lib.modules import cleantitle
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import metacache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
import os,sys,re,json,urllib,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
control.moderator()
class movies:
def __init__(self):
self.list = []
self.imdb_link = 'http://www.imdb.com'
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.trakt_user = control.setting('trakt.user').strip()
self.imdb_user = control.setting('imdb.user').replace('ur', '')
self.tm_user = control.setting('tm.user')
self.fanart_tv_user = control.setting('fanart.tv.user')
self.user = str(control.setting('fanart.tv.user')) + str(control.setting('tm.user'))
self.lang = control.apiLanguage()['trakt']
self.search_link = 'http://api-v2launch.trakt.tv/search?type=movie&limit=20&page=1&query='
self.imdb_info_link = 'http://www.omdbapi.com/?i=%s&plot=full&r=json'
self.trakt_info_link = 'http://api-v2launch.trakt.tv/movies/%s'
self.trakt_lang_link = 'http://api-v2launch.trakt.tv/movies/%s/translations/%s'
self.fanart_tv_art_link = 'http://webservice.fanart.tv/v3/movies/%s'
self.fanart_tv_level_link = 'http://webservice.fanart.tv/v3/level'
self.tm_art_link = 'http://api.themoviedb.org/3/movie/%s/images?api_key=' + self.tm_user
self.tm_img_link = 'https://image.tmdb.org/t/p/w%s%s'
self.persons_link = 'http://www.imdb.com/search/name?count=100&name='
self.personlist_link = 'http://www.imdb.com/search/name?count=100&gender=male,female'
self.popular_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1'
self.views_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&sort=num_votes,desc&count=40&start=1'
self.featured_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=1000,&production_status=released&release_date=date[365],date[60]&sort=moviemeter,asc&count=40&start=1'
self.person_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&role=%s&sort=year,desc&count=40&start=1'
self.genre_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&release_date=date[730],date[30]&genres=%s&sort=moviemeter,asc&count=40&start=1'
self.language_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&languages=%s&sort=moviemeter,asc&count=40&start=1'
self.certification_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&certificates=us:%s&sort=moviemeter,asc&count=40&start=1'
self.year_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&num_votes=100,&production_status=released&year=%s,%s&sort=moviemeter,asc&count=40&start=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&sort=boxoffice_gross_us,desc&count=40&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&production_status=released&groups=oscar_best_picture_winners&sort=year,desc&count=40&start=1'
self.theaters_link = 'http://www.imdb.com/search/title?title_type=feature&num_votes=1000,&release_date=date[365],date[0]&sort=release_date_us,desc&count=40&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?limit=40&page=1'
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/me/collection/movies'
self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/me/watchlist/movies'
self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/movies?limit=40&page=1'
self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdblist2_link = 'http://www.imdb.com/list/%s/?view=detail&sort=created:desc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=alpha,asc' % self.imdb_user
self.imdbwatchlist2_link = 'http://www.imdb.com/user/ur%s/watchlist?sort=date_added,desc' % self.imdb_user
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/users/me/' in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url, self.trakt_user): raise Exception()
self.list = cache.get(self.trakt_list, 720, url, self.trakt_user)
except:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
if '/users/me/' in url and not '/watchlist/' in url:
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower()))
if idx == True: self.worker()
elif u in self.trakt_link and self.search_link in url:
self.list = cache.get(self.trakt_list, 1, url, self.trakt_user)
if idx == True: self.worker(level=0)
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url, self.trakt_user)
if idx == True: self.worker() ; self.list = [i for i in self.list if not i['poster'] == '0']
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 0, url)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker() ; self.list = [i for i in self.list if not i['poster'] == '0']
if idx == True: self.movieDirectory(self.list)
return self.list
except:
pass
def widget(self):
setting = control.setting('movie.widget')
if setting == '2':
self.get(self.trending_link)
elif setting == '3':
self.get(self.popular_link)
elif setting == '4':
self.get(self.theaters_link)
else:
self.get(self.featured_link)
def search(self):
try:
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
url = self.search_link + urllib.quote_plus(q)
url = '%s?action=moviePage&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
except:
return
def person(self):
try:
control.idle()
t = control.lang(32010).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
q = k.getText() if k.isConfirmed() else None
if (q == None or q == ''): return
url = self.persons_link + urllib.quote_plus(q)
url = '%s?action=moviePersons&url=%s' % (sys.argv[0], urllib.quote_plus(url))
control.execute('Container.Update(%s)' % url)
except:
return
def genres(self):
genres = [
('Action', 'action'),
('Adventure', 'adventure'),
('Animation', 'animation'),
('Biography', 'biography'),
('Comedy', 'comedy'),
('Crime', 'crime'),
('Drama', 'drama'),
('Family', 'family'),
('Fantasy', 'fantasy'),
('History', 'history'),
('Horror', 'horror'),
('Music ', 'music'),
('Musical', 'musical'),
('Mystery', 'mystery'),
('Romance', 'romance'),
('Science Fiction', 'sci_fi'),
('Sport', 'sport'),
('Thriller', 'thriller'),
('War', 'war'),
('Western', 'western')
]
for i in genres: self.list.append({'name': cleangenre.lang(i[0], self.lang), 'url': self.genre_link % i[1], 'image': 'genres.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def languages(self):
languages = [
('Arabic', 'ar'),
('Bulgarian', 'bg'),
('Chinese', 'zh'),
('Croatian', 'hr'),
('Dutch', 'nl'),
('English', 'en'),
('Finnish', 'fi'),
('French', 'fr'),
('German', 'de'),
('Greek', 'el'),
('Hebrew', 'he'),
('Hindi ', 'hi'),
('Hungarian', 'hu'),
('Icelandic', 'is'),
('Italian', 'it'),
('Japanese', 'ja'),
('Korean', 'ko'),
('Norwegian', 'no'),
('Persian', 'fa'),
('Polish', 'pl'),
('Portuguese', 'pt'),
('Punjabi', 'pa'),
('Romanian', 'ro'),
('Russian', 'ru'),
('Spanish', 'es'),
('Swedish', 'sv'),
('Turkish', 'tr'),
('Ukrainian', 'uk')
]
for i in languages: self.list.append({'name': str(i[0]), 'url': self.language_link % i[1], 'image': 'languages.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def certifications(self):
certificates = ['G', 'PG', 'PG-13', 'R', 'NC-17']
for i in certificates: self.list.append({'name': str(i), 'url': self.certification_link % str(i).replace('-', '_').lower(), 'image': 'certificates.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % (str(i), str(i)), 'image': 'years.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self, url):
if url == None:
self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
else:
self.list = cache.get(self.imdb_person_list, 1, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
if not int(q['limit']) == len(items): raise Exception()
q.update({'page': str(int(q['page']) + 1)})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
try: premiered = item['released']
except: premiered = '0'
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try: genre = item['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
try: mpaa = item['certification']
except: mpaa = '0'
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
try: plot = item['overview']
except: plot = '0'
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'imdb': imdb, 'tvdb': '0', 'poster': '0', 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
def imdb_watchlist_id(url):
return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
if url == self.imdbwatchlist_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
elif url == self.imdbwatchlist2_link:
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist2_link % url
result = client.request(url)
result = result.replace('\n','')
items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})
if len(next) == 0:
next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
next = [i[0] for i in next if 'Next' in i[1]]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = client.parseDOM(item, 'a')[1]
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
year = re.findall('(\d{4})', year[0])[0]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = re.findall('(tt\d*)', imdb)[0]
imdb = imdb.encode('utf-8')
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: poster = '0'
if '/nopicture/' in poster: poster = '0'
poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
except: genre = '0'
genre = ' / '.join([i.strip() for i in genre.split(',')])
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
except: duration = '0'
duration = duration.encode('utf-8')
rating = '0'
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: pass
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
except: pass
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
except: votes = '0'
try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
except: director = '0'
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
except: cast = '0'
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
plot = re.sub('<.+?>|</.+?>', '', plot)
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': '0', 'poster': poster, 'next': next})
except:
pass
return self.list
def imdb_person_list(self, url):
try:
result = client.request(url)
items = client.parseDOM(result, 'tr', attrs = {'class': '.+? detailed'})
except:
return
for item in items:
try:
name = client.parseDOM(item, 'a', ret='title')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = re.findall('(nm\d*)', url, re.I)[0]
url = self.person_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = client.parseDOM(item, 'img', ret='src')[0]
if not ('._SX' in image or '._SY' in image): raise Exception()
image = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', image)
image = client.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def worker(self, level=1):
self.meta = []
total = len(self.list)
self.fanart_tv_headers = {'api-key': 'YTc2MGMyMTEzYTM1OTk5NzFiN2FjMWU0OWUzMTAyMGQ='.decode('base64')}
if not self.fanart_tv_user == '':
self.fanart_tv_headers.update({'client-key': self.fanart_tv_user})
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.lang, self.user)
for r in range(0, total, 40):
threads = []
for i in range(r, r+40):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
if self.meta: metacache.insert(self.meta)
self.list = [i for i in self.list if not i['imdb'] == '0']
self.list = metacache.local(self.list, self.tm_img_link, 'poster3', 'fanart2')
if self.fanart_tv_user == '':
for i in self.list: i.update({'clearlogo': '0', 'clearart': '0'})
def super_info(self, i):
try:
if self.list[i]['metacache'] == True: raise Exception()
imdb = self.list[i]['imdb']
url = self.imdb_info_link % imdb
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['Title']
title = title.encode('utf-8')
originaltitle = title
year = item['Year']
year = year.encode('utf-8')
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
imdb = imdb.encode('utf-8')
premiered = item['Released']
if premiered == None or premiered == '' or premiered == 'N/A': premiered = '0'
premiered = re.findall('(\d*) (.+?) (\d*)', premiered)
try: premiered = '%s-%s-%s' % (premiered[0][2], {'Jan':'01', 'Feb':'02', 'Mar':'03', 'Apr':'04', 'May':'05', 'Jun':'06', 'Jul':'07', 'Aug':'08', 'Sep':'09', 'Oct':'10', 'Nov':'11', 'Dec':'12'}[premiered[0][1]], premiered[0][0])
except: premiered = '0'
premiered = premiered.encode('utf-8')
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': genre = '0'
genre = genre.replace(', ', ' / ')
genre = genre.encode('utf-8')
duration = item['Runtime']
if duration == None or duration == '' or duration == 'N/A': duration = '0'
duration = re.sub('[^0-9]', '', str(duration))
duration = duration.encode('utf-8')
rating = item['imdbRating']
if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
votes = item['imdbVotes']
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None or votes == '' or votes == 'N/A': votes = '0'
votes = votes.encode('utf-8')
mpaa = item['Rated']
if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
mpaa = mpaa.encode('utf-8')
director = item['Director']
if director == None or director == '' or director == 'N/A': director = '0'
director = director.replace(', ', ' / ')
director = re.sub(r'\(.*?\)', '', director)
director = ' '.join(director.split())
director = director.encode('utf-8')
writer = item['Writer']
if writer == None or writer == '' or writer == 'N/A': writer = '0'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
cast = item['Actors']
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
plot = item['Plot']
if plot == None or plot == '' or plot == 'N/A': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
poster = item['Poster']
if poster == None or poster == '' or poster == 'N/A': poster = '0'
if '/nopicture/' in poster: poster = '0'
poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
if 'poster' in self.list[i] and poster == '0': poster = self.list[i]['poster']
poster = poster.encode('utf-8')
artmeta = True
art = client.request(self.fanart_tv_art_link % imdb, headers=self.fanart_tv_headers, timeout='10', error=True)
try: art = json.loads(art)
except: artmeta = False
try:
poster2 = art['movieposter']
poster2 = [x for x in poster2 if x.get('lang') == 'en'][::-1] + [x for x in poster2 if x.get('lang') == '00'][::-1]
poster2 = poster2[0]['url'].encode('utf-8')
except:
poster2 = '0'
try:
if 'moviebackground' in art: fanart = art['moviebackground']
else: fanart = art['moviethumb']
fanart = [x for x in fanart if x.get('lang') == 'en'][::-1] + [x for x in fanart if x.get('lang') == '00'][::-1]
fanart = fanart[0]['url'].encode('utf-8')
except:
fanart = '0'
try:
banner = art['moviebanner']
banner = [x for x in banner if x.get('lang') == 'en'][::-1] + [x for x in banner if x.get('lang') == '00'][::-1]
banner = banner[0]['url'].encode('utf-8')
except:
banner = '0'
try:
if 'hdmovielogo' in art: clearlogo = art['hdmovielogo']
else: clearlogo = art['clearlogo']
clearlogo = [x for x in clearlogo if x.get('lang') == 'en'][::-1] + [x for x in clearlogo if x.get('lang') == '00'][::-1]
clearlogo = clearlogo[0]['url'].encode('utf-8')
except:
clearlogo = '0'
try:
if 'hdmovieclearart' in art: clearart = art['hdmovieclearart']
else: clearart = art['clearart']
clearart = [x for x in clearart if x.get('lang') == 'en'][::-1] + [x for x in clearart if x.get('lang') == '00'][::-1]
clearart = clearart[0]['url'].encode('utf-8')
except:
clearart = '0'
try:
if self.tm_user == '': raise Exception()
art2 = client.request(self.tm_art_link % imdb, timeout='10', error=True)
art2 = json.loads(art2)
except:
pass
try:
poster3 = art2['posters']
poster3 = [x for x in poster3 if x.get('iso_639_1') == 'en'] + [x for x in poster3 if not x.get('iso_639_1') == 'en']
poster3 = [(x['width'], x['file_path']) for x in poster3]
poster3 = [(x[0], x[1]) if x[0] < 300 else ('300', x[1]) for x in poster3]
poster3 = self.tm_img_link % poster3[0]
poster3 = poster3.encode('utf-8')
except:
poster3 = '0'
try:
fanart2 = art2['backdrops']
fanart2 = [x for x in fanart2 if x.get('iso_639_1') == 'en'] + [x for x in fanart2 if not x.get('iso_639_1') == 'en']
fanart2 = [x for x in fanart2 if x.get('width') == 1920] + [x for x in fanart2 if x.get('width') < 1920]
fanart2 = [(x['width'], x['file_path']) for x in fanart2]
fanart2 = [(x[0], x[1]) if x[0] < 1280 else ('1280', x[1]) for x in fanart2]
fanart2 = self.tm_img_link % fanart2[0]
fanart2 = fanart2.encode('utf-8')
except:
fanart2 = '0'
try:
if self.lang == 'en': raise Exception()
url = self.trakt_lang_link % (imdb, self.lang)
item = trakt.getTrakt(url)
item = json.loads(item)[0]
t = item['title']
if not (t == None or t == ''):
try: title = t.encode('utf-8')
except: pass
t = item['overview']
if not (t == None or t == ''):
try: plot = t.encode('utf-8')
except: pass
except:
pass
item = {'title': title, 'originaltitle': originaltitle, 'year': year, 'imdb': imdb, 'poster': poster, 'poster2': poster2, 'poster3': poster3, 'banner': banner, 'fanart': fanart, 'fanart2': fanart2, 'clearlogo': clearlogo, 'clearart': clearart, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot}
item = dict((k,v) for k, v in item.iteritems() if not v == '0')
self.list[i].update(item)
if artmeta == False: raise Exception()
meta = {'imdb': imdb, 'tvdb': '0', 'lang': self.lang, 'user': self.user, 'item': item}
self.meta.append(meta)
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getMovieIndicators(refresh=True) if action == 'movies' else playcount.getMovieIndicators()
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
nextMenu = control.lang(32053).encode('utf-8')
for i in items:
try:
label = '%s (%s)' % (i['title'], i['year'])
imdb, title, year = i['imdb'], i['originaltitle'], i['year']
sysname = urllib.quote_plus('%s (%s)' % (title, year))
systitle = urllib.quote_plus(title)
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'movie'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
#meta.update({'trailer': 'plugin://script.extendedinfo/?info=playtrailer&&id=%s' % imdb})
if not 'duration' in i: meta.update({'duration': '120'})
elif i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
try:
overlay = int(playcount.getMovieOverlay(indicators, imdb))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'poster3' in i and not i['poster3'] == '0':
art.update({'icon': i['poster3'], 'thumb': i['poster3'], 'poster': i['poster3']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']})
elif 'poster2' in i and not i['poster2'] == '0':
art.update({'icon': i['poster2'], 'thumb': i['poster2'], 'poster': i['poster2']})
else:
art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
else:
art.update({'banner': addonBanner})
if 'clearlogo' in i and not i['clearlogo'] == '0':
art.update({'clearlogo': i['clearlogo']})
if 'clearart' in i and not i['clearart'] == '0':
art.update({'clearart': i['clearart']})
if settingFanart == 'true' and 'fanart2' in i and not i['fanart2'] == '0':
item.setProperty('Fanart_Image', i['fanart2'])
elif settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
icon = control.addonNext()
url = '%s?action=moviePage&url=%s' % (sysaddon, urllib.quote_plus(url))
item = control.item(label=nextMenu)
item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'banner': icon})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'movies')
control.directory(syshandle, cacheToDisc=True)
views.setView('movies', {'skin.estuary': 55, 'skin.confluence': 500})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
|
viranch/exodus
|
resources/lib/indexers/movies.py
|
Python
|
gpl-3.0
| 44,317
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import base64
import functools
import time
import zlib
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.objects import aggregate as aggregate_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.image.glance.GlanceStore',
help='Dom0 plugin driver used to handle image uploads.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_CONFIGDRIVE = '3'
# Note(johngarbutt) HVM guests only support four devices
# until the PV tools activate, when others before available
# As such, ephemeral disk only available once PV tools load
# Note(johngarbutt) When very large ephemeral storage is required,
# multiple disks may be added. In this case the device id below
# is the used for the first disk. The second disk will be given
# next device id, i.e. 5, and so on, until enough space is added.
DEVICE_EPHEMERAL = '4'
# Note(johngarbutt) Currently don't support ISO boot during rescue
# and we must have the ISO visible before the PV drivers start
DEVICE_CD = '1'
def make_step_decorator(context, instance, update_instance_progress):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
update_instance_progress(context, instance,
step_info['current'], step_info['total'])
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
LOG.debug(_("Importing image upload handler: %s"),
CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
def agent_enabled(self, instance):
if CONF.xenapi_disable_agent:
return False
return xapi_agent.should_use_agent(instance)
def _get_agent(self, instance, vm_ref):
if self.agent_enabled(instance):
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def instance_exists(self, name_label):
return vm_utils.lookup(self._session, name_label) is not None
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
nova_uuids = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
other_config = vm_rec['other_config']
nova_uuid = other_config.get('nova_uuid')
if nova_uuid:
nova_uuids.append(nova_uuid)
return nova_uuids
def confirm_migration(self, migration, instance, network_info):
self._destroy_orig_vm(instance, network_info)
def _destroy_orig_vm(self, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None,
power_on=True):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info,
power_on)
def _restore_orig_vm_and_cleanup_orphan(self, instance,
block_device_info=None,
power_on=True):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
if power_on and vm_utils.is_vm_shutdown(self._session, vm_ref):
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
def null_step_decorator(f):
return f
def create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label):
#TODO(johngarbutt) clean up if this is not run
vdis = vm_utils.import_all_migrated_disks(self._session,
instance)
def undo_create_disks():
eph_vdis = vdis['ephemerals']
root_vdi = vdis['root']
vdi_refs = [vdi['ref'] for vdi in eph_vdis.values()]
vdi_refs.append(root_vdi['ref'])
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
def completed_callback():
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
self._spawn(context, instance, image_meta, null_step_decorator,
create_disks_step, first_boot=False, injected_files=None,
admin_password=None, network_info=network_info,
block_device_info=block_device_info, name_label=None,
rescue=False, power_on=power_on, resize=resize_instance,
completed_callback=completed_callback)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
step = make_step_decorator(context, instance,
self._update_instance_progress)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label, image_meta.get('id'),
disk_image_type, block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
self._spawn(context, instance, image_meta, step, create_disks_step,
True, injected_files, admin_password,
network_info, block_device_info, name_label, rescue)
def _spawn(self, context, instance, image_meta, step, create_disks_step,
first_boot, injected_files=None, admin_password=None,
network_info=None, block_device_info=None,
name_label=None, rescue=False, power_on=True, resize=True,
completed_callback=None):
if name_label is None:
name_label = instance['name']
self._ensure_instance_name_unique(name_label)
self._ensure_enough_free_mem(instance)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file, ramdisk_file = vm_utils.create_kernel_and_ramdisk(
context, self._session, instance, name_label)
def undo_create_kernel_ramdisk():
vm_utils.destroy_kernel_ramdisk(self._session, instance,
kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
try:
ipxe_boot = strutils.bool_from_string(
image_meta['properties']['ipxe_boot'])
except KeyError:
ipxe_boot = False
if ipxe_boot:
if 'iso' in vdis:
vm_utils.handle_ipxe_iso(
self._session, instance, vdis['iso'], network_info)
else:
LOG.warning(_('ipxe_boot is True but no ISO image found'),
instance=instance)
root_vdi = vdis.get('root')
if root_vdi and resize:
self._resize_up_root_vdi(instance, root_vdi)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, network_info, admin_password,
injected_files)
if not first_boot:
self._attach_mapped_block_devices(instance,
block_device_info)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
vbd_ref = self._attach_orig_disk_for_rescue(instance, vm_ref)
def undo_attach_root_disk():
# destroy the vbd in preparation to re-attach the VDI
# to its original VM. (does not delete VDI)
vm_utils.destroy_vbd(self._session, vbd_ref)
undo_mgr.undo_with(undo_attach_root_disk)
@step
def inject_instance_data_step(undo_mgr, vm_ref, vdis):
self._inject_instance_metadata(instance, vm_ref)
self._inject_auto_disk_config(instance, vm_ref)
# NOTE: We add the hostname here so windows PV tools
# can pick it up during booting
if first_boot:
self._inject_hostname(instance, vm_ref, rescue)
self._file_inject_vm_settings(instance, vm_ref, vdis, network_info)
self.inject_network_info(instance, network_info, vm_ref)
@step
def setup_network_step(undo_mgr, vm_ref):
self._create_vifs(instance, vm_ref, network_info)
self._prepare_instance_filter(instance, network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
if power_on:
self._start(instance, vm_ref)
self._wait_for_instance_to_start(instance, vm_ref)
@step
def configure_booted_instance_step(undo_mgr, vm_ref):
if first_boot:
self._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self._remove_hostname(instance, vm_ref)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
inject_instance_data_step(undo_mgr, vm_ref, vdis)
setup_network_step(undo_mgr, vm_ref)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
configure_booted_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
if completed_callback:
completed_callback()
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _attach_orig_disk_for_rescue(self, instance, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
return vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
def _file_inject_vm_settings(self, instance, vm_ref, vdis, network_info):
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
def _ensure_instance_name_unique(self, name_label):
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
def _ensure_enough_free_mem(self, instance):
if not vm_utils.is_enough_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
def _create_vm_record(self, context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
mode = vm_utils.determine_vm_mode(instance, disk_image_type)
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
use_pv_kernel = (mode == vm_mode.XEN)
LOG.debug(_("Using PV kernel: %s") % use_pv_kernel, instance=instance)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, network_info,
admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = flavors.extract_flavor(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
root_disk_size = instance_type['root_gb']
if root_disk_size > 0:
vm_utils.generate_iso_blank_root_disk(self._session, instance,
vm_ref, DEVICE_ROOT, name_label, root_disk_size)
cd_vdi = vdis.pop('iso')
vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize root disk..."), instance=instance)
vm_utils.try_auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
ephemeral_vdis = vdis.get('ephemerals')
if ephemeral_vdis:
# attach existing (migrated) ephemeral disks
for userdevice, ephemeral_vdi in ephemeral_vdis.iteritems():
vm_utils.create_vbd(self._session, vm_ref,
ephemeral_vdi['ref'],
userdevice, bootable=False)
else:
# create specified ephemeral disks
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
network_info,
admin_password=admin_password,
files=files)
def _wait_for_instance_to_start(self, instance, vm_ref):
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
def _configure_new_instance_with_agent(self, instance, vm_ref,
injected_files, admin_password):
if not self.agent_enabled(instance):
LOG.debug(_("Skip agent setup, not enabled."), instance=instance)
return
agent = self._get_agent(instance, vm_ref)
version = agent.get_version()
if not version:
LOG.debug(_("Skip agent setup, unable to contact agent."),
instance=instance)
return
LOG.debug(_('Detected agent version: %s'), version, instance=instance)
# NOTE(johngarbutt) the agent object allows all of
# the following steps to silently fail
agent.update_if_needed(version)
agent.inject_ssh_key()
if injected_files:
agent.inject_files(injected_files)
if admin_password:
agent.set_admin_password(admin_password)
agent.resetnetwork()
def _prepare_instance_filter(self, instance, network_info):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
def _get_vm_opaque_ref(self, instance, check_rescue=False):
"""Get xapi OpaqueRef from a db record.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call
'xenapi_image_upload_handler' to upload the images.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
post_snapshot_callback=update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the get_vdis_for_instance step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %d"), progress,
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
step = make_step_decorator(context, instance,
self._update_instance_progress)
@step
def fake_step_to_match_resizing_up():
pass
@step
def rename_and_power_off_vm(undo_mgr):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._apply_orig_vm_name_label(instance, vm_ref)
def restore_orig_vm():
# Do not need to restore block devices, not yet been removed
self._restore_orig_vm_and_cleanup_orphan(instance)
undo_mgr.undo_with(restore_orig_vm)
@step
def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref):
new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session,
instance, old_vdi_ref, instance_type)
def cleanup_vdi_copy():
vm_utils.destroy_vdi(self._session, new_vdi_ref)
undo_mgr.undo_with(cleanup_vdi_copy)
return new_vdi_ref, new_vdi_uuid
@step
def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid):
vm_utils.migrate_vhd(self._session, instance, new_vdi_uuid,
dest, sr_path, 0)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_vdi_ref)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
undo_mgr = utils.UndoManager()
try:
fake_step_to_match_resizing_up()
rename_and_power_off_vm(undo_mgr)
old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize(
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception as error:
LOG.exception(_("_migrate_disk_resizing_down failed. "
"Restoring orig vm due_to: %s."), error,
instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
step = make_step_decorator(context, instance,
self._update_instance_progress)
"""
NOTE(johngarbutt) Understanding how resize up works.
For resize up, we attempt to minimize the amount of downtime
for users by copying snapshots of their disks, while their
VM is still running.
It is worth noting, that migrating the snapshot, means migrating
the whole VHD chain up to, but not including, the leaf VHD the VM
is still writing to.
Once the snapshots have been migrated, we power down the VM
and migrate all the disk changes since the snapshots were taken.
In addition, the snapshots are taken at the latest possible point,
to help minimize the time it takes to migrate the disk changes
after the VM has been turned off.
Before starting to migrate any of the disks, we rename the VM,
to <current_vm_name>-orig, in case we attempt to migrate the VM
back onto this host, and so once we have completed the migration
of the disk, confirm/rollback migrate can work in the usual way.
If there is a failure at any point, we need to rollback to the
position we were in before starting to migrate. In particular,
we need to delete and snapshot VDIs that may have been created,
and restore the VM back to its original name.
"""
@step
def fake_step_to_show_snapshot_complete():
pass
@step
def transfer_immutable_vhds(root_vdi_uuids):
active_root_vdi_uuid = root_vdi_uuids[0]
immutable_root_vdi_uuids = root_vdi_uuids[1:]
for vhd_num, vdi_uuid in enumerate(immutable_root_vdi_uuids,
start=1):
vm_utils.migrate_vhd(self._session, instance, vdi_uuid, dest,
sr_path, vhd_num)
LOG.debug(_("Migrated root base vhds"), instance=instance)
return active_root_vdi_uuid
def _process_ephemeral_chain_recursive(ephemeral_chains,
active_vdi_uuids):
# This method is called several times, recursively.
# The first phase snapshots the ephemeral disks, and
# migrates the read only VHD files.
# The final call into this method calls
# power_down_and_transfer_leaf_vhds
# to turn off the VM and copy the rest of the VHDs.
number_of_chains = len(ephemeral_chains)
if number_of_chains == 0:
# If we get here, we have snapshotted and migrated
# all the ephemeral disks, so its time to power down
# and complete the migration of the diffs since the snapshot
LOG.debug(_("Migrated all base vhds."), instance=instance)
return power_down_and_transfer_leaf_vhds(
active_root_vdi_uuid,
active_vdi_uuids)
current_chain = ephemeral_chains[0]
remaining_chains = []
if number_of_chains > 1:
remaining_chains = ephemeral_chains[1:]
ephemeral_disk_index = len(active_vdi_uuids)
userdevice = int(DEVICE_EPHEMERAL) + ephemeral_disk_index
# Here we take a snapshot of the ephemeral disk,
# and migrate all VHDs in the chain that are not being written to
# Once that is completed, we call back into this method to either:
# - migrate any remaining ephemeral disks
# - or, if all disks are migrated, we power down and complete
# the migration but copying the diffs since all the snapshots
# were taken
with vm_utils.snapshot_attached_here(self._session, instance,
vm_ref, label, str(userdevice)) as chain_vdi_uuids:
# remember active vdi, we will migrate these later
active_vdi_uuids.append(chain_vdi_uuids[0])
# migrate inactive vhds
inactive_vdi_uuids = chain_vdi_uuids[1:]
ephemeral_disk_number = ephemeral_disk_index + 1
for seq_num, vdi_uuid in enumerate(inactive_vdi_uuids,
start=1):
vm_utils.migrate_vhd(self._session, instance, vdi_uuid,
dest, sr_path, seq_num,
ephemeral_disk_number)
LOG.debug(_("Read-only migrated for disk: %s") % userdevice,
instance=instance)
# This is recursive to simplify the taking and cleaning up
# of all the ephemeral disk snapshots
return _process_ephemeral_chain_recursive(remaining_chains,
active_vdi_uuids)
@step
def transfer_ephemeral_disks_then_all_leaf_vdis():
ephemeral_chains = vm_utils.get_all_vdi_uuids_for_vm(
self._session, vm_ref,
min_userdevice=int(DEVICE_EPHEMERAL))
if ephemeral_chains:
ephemeral_chains = list(ephemeral_chains)
else:
ephemeral_chains = []
_process_ephemeral_chain_recursive(ephemeral_chains, [])
@step
def power_down_and_transfer_leaf_vhds(root_vdi_uuid,
ephemeral_vdi_uuids=None):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
vm_utils.migrate_vhd(self._session, instance, root_vdi_uuid,
dest, sr_path, 0)
if ephemeral_vdi_uuids:
for ephemeral_disk_number, ephemeral_vdi_uuid in enumerate(
ephemeral_vdi_uuids, start=1):
vm_utils.migrate_vhd(self._session, instance,
ephemeral_vdi_uuid, dest,
sr_path, 0, ephemeral_disk_number)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
self._apply_orig_vm_name_label(instance, vm_ref)
try:
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as root_vdi_uuids:
# NOTE(johngarbutt) snapshot attached here will delete
# the snapshot if an error occurs
fake_step_to_show_snapshot_complete()
# transfer all the non-active VHDs in the root disk chain
active_root_vdi_uuid = transfer_immutable_vhds(root_vdi_uuids)
# snapshot and transfer all ephemeral disks
# then power down and transfer any diffs since
# the snapshots were taken
transfer_ephemeral_disks_then_all_leaf_vdis()
except Exception as error:
LOG.exception(_("_migrate_disk_resizing_up failed. "
"Restoring orig vm due_to: %s."), error,
instance=instance)
try:
self._restore_orig_vm_and_cleanup_orphan(instance)
#TODO(johngarbutt) should also cleanup VHDs at destination
except Exception as rollback_error:
LOG.warn(_("_migrate_disk_resizing_up failed to "
"rollback: %s"), rollback_error,
instance=instance)
raise exception.InstanceFaultRollback(error)
def _apply_orig_vm_name_label(self, instance, vm_ref):
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
def _ensure_not_resize_ephemeral(self, instance, instance_type):
old_gb = instance["ephemeral_gb"]
new_gb = instance_type["ephemeral_gb"]
if old_gb != new_gb:
reason = _("Unable to resize ephemeral disks")
raise exception.ResizeError(reason)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, block_device_info):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
self._ensure_not_resize_ephemeral(instance, instance_type)
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
resize_down = old_gb > new_gb
if new_gb == 0 and old_gb != 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
self._detach_block_devices_from_orig_vm(instance, block_device_info)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _detach_block_devices_from_orig_vm(self, instance, block_device_info):
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
name_label = self._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info, name_label,
mount_device)
def _resize_up_root_vdi(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"),
{'vdi_uuid': vdi_uuid, 'old_gb': old_gb,
'new_gb': new_gb}, instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version.
"""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure as exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def _inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
@utils.synchronized('xenstore-' + instance['uuid'])
def store_meta(topdir, data_dict):
for key, value in data_dict.items():
key = self._sanitize_xenstore_key(key)
value = value or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', utils.instance_meta(instance))
def _inject_auto_disk_config(self, instance, vm_ref):
"""Inject instance's auto_disk_config attribute into xenstore."""
@utils.synchronized('xenstore-' + instance['uuid'])
def store_auto_disk_config(key, value):
value = value and True or False
self._add_to_param_xenstore(vm_ref, key, str(value))
store_auto_disk_config('vm-data/auto-disk-config',
instance['auto_disk_config'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
# NOTE(johngarbutt) race conditions mean we can still get here
# during operations where the VM is not present, like resize.
# Skip the update when not possible, as the updated metadata will
# get added when the VM is being booted up at the end of the
# resize or rebuild.
LOG.warn(_("Unable to update metadata, VM not found."),
instance=instance, exc_info=True)
return
def process_change(location, change):
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
@utils.synchronized('xenstore-' + instance['uuid'])
def update_meta():
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
process_change(location, change)
update_meta()
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, instance,
kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM.
"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return last few lines of instance console."""
dom_id = self._get_dom_id(instance, check_rescue=True)
try:
raw_console_data = self._session.call_plugin('console',
'get_console_log', {'dom_id': dom_id})
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
return zlib.decompress(base64.b64decode(raw_console_data))
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
if instance['vm_state'] == vm_states.RESCUED:
name = '%s-rescue' % instance['name']
vm_ref = vm_utils.lookup(self._session, name)
if vm_ref is None:
# The rescue instance might not be ready at this point.
raise exception.InstanceNotReady(instance_id=instance['uuid'])
else:
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_nwinfo():
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
update_nwinfo()
def _create_vifs(self, instance, vm_ref, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %s'),
network_ref, instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
{'vif_ref': vif_ref, 'network_ref': network_ref},
instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance, rescue=False):
"""Calls resetnetwork method in agent."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
self._inject_hostname(instance, vm_ref, rescue)
agent.resetnetwork()
self._remove_hostname(instance, vm_ref)
else:
raise NotImplementedError()
def _inject_hostname(self, instance, vm_ref, rescue):
"""Inject the hostname of the instance into the xenstore."""
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname (%s) into xenstore") % hostname,
instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_hostname():
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
update_hostname()
def _remove_hostname(self, instance, vm_ref):
LOG.debug(_("Removing hostname from xenstore"), instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_hostname():
self._remove_from_param_xenstore(vm_ref, 'vm-data/hostname')
update_hostname()
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance=None, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
args = {}
if instance or vm_ref:
args['dom_id'] = self._get_dom_id(instance, vm_ref)
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
return {'returncode': 'error', 'message': err_msg}
def _get_dom_id(self, instance=None, vm_ref=None, check_rescue=False):
vm_ref = vm_ref or self._get_vm_opaque_ref(instance, check_rescue)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_rec['domid']
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = aggregate_obj.AggregateList.get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadata[hostname]
except KeyError:
reason = _('Destination host:%s must be in the same '
'aggregate as the source server') % hostname
raise exception.MigrationPreCheckError(reason=reason)
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
msg = _('No suitable network for migrate')
raise exception.MigrationPreCheckError(reason=msg)
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg)
return migrate_data
def _get_iscsi_srs(self, ctxt, instance_ref):
vm_ref = self._get_vm_opaque_ref(instance_ref)
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
iscsi_srs = []
for vbd_ref in vbd_refs:
vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref)
# Check if it's on an iSCSI SR
sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref)
if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi':
iscsi_srs.append(sr_ref)
return iscsi_srs
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
dest_check_data = {}
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data.update(
{"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}})
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return dest_check_data
def _is_xsm_sr_check_relaxed(self):
try:
return self.cached_xsm_sr_relaxed
except AttributeError:
config_value = None
try:
config_value = self._make_plugin_call('config_file',
'get_val',
key='relax-xsm-sr-check')
except Exception as exc:
LOG.exception(exc)
self.cached_xsm_sr_relaxed = config_value == "true"
return self.cached_xsm_sr_relaxed
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0:
# XAPI must support the relaxed SR check for live migrating with
# iSCSI VBDs
if not self._is_xsm_sr_check_relaxed():
raise exception.MigrationError(_('XAPI supporting '
'relax-xsm-sr-check=true required'))
if 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('VM.assert_can_migrate failed')
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None):
"""generate a vdi_map for _call_live_migrate_command."""
if sr_ref is None:
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
# Add destination SR refs for all of the VDIs that we created
# as part of the pre migration callback
if 'pre_live_migration_result' in migrate_data:
pre_migrate_data = migrate_data['pre_live_migration_result']
sr_uuid_map = pre_migrate_data.get('sr_uuid_map', [])
for sr_uuid in sr_uuid_map:
# Source and destination SRs have the same UUID, so get the
# reference for the local SR
sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
vdi_map.update(
self._generate_vdi_map(
sr_uuid_map[sr_uuid], vm_ref, sr_ref))
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
iscsi_srs = self._get_iscsi_srs(context, instance)
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
# Tidy up the iSCSI SRs
for sr_ref in iscsi_srs:
volume_utils.forget_sr(self._session, sr_ref)
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {"live": "true"})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def post_live_migration_at_destination(self, context, instance,
network_info, block_migration,
block_device_info):
# FIXME(johngarbutt): we should block all traffic until we have
# applied security groups, however this requires changes to XenServer
self._prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
def attach_block_device_volumes(self, block_device_info):
sr_uuid_map = {}
try:
if block_device_info is not None:
for block_device_map in block_device_info[
'block_device_mapping']:
sr_uuid, _ = self._volumeops.attach_volume(
block_device_map['connection_info'],
None,
block_device_map['mount_device'],
hotplug=False)
sr_ref = self._session.call_xenapi('SR.get_by_uuid',
sr_uuid)
sr_uuid_map[sr_uuid] = sr_ref
except Exception:
with excutils.save_and_reraise_exception():
# Disconnect the volumes we just connected
for sr in sr_uuid_map:
volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref])
return sr_uuid_map
|
ntt-sic/nova
|
nova/virt/xenapi/vmops.py
|
Python
|
apache-2.0
| 88,905
|
"""
=============
Generic Views
=============
Class based helper views.
"""
class GenericManyToMany(object):
"""Generic view to edit many to many relations with extra fields."""
left_table = None
right_table = None
allow_multiple = True
|
tjnapster555/django-edu
|
djangoedu/core/generic_views.py
|
Python
|
mit
| 264
|
CODES = {
200: 'thunderstorm with light rain',
201: 'thunderstorm with rain',
202: 'thunderstorm with heavy rain',
210: 'light thunderstorm',
211: 'thunderstorm',
212: 'heavy thunderstorm',
221: 'ragged thunderstorm',
230: 'thunderstorm with light drizzle',
231: 'thunderstorm with drizzle',
232: 'thunderstorm with heavy drizzle',
300: 'light intensity drizzle',
301: 'drizzle',
302: 'heavy intensity drizzle',
310: 'light intensity drizzle rain',
311: 'drizzle rain',
312: 'heavy intensity drizzle rain',
313: 'shower rain and drizzle',
314: 'heavy shower rain and drizzle',
321: 'shower drizzle',
500: 'light rain',
501: 'moderate rain',
502: 'heavy intensity rain',
503: 'very heavy rain',
504: 'extreme rain',
511: 'freezing rain',
520: 'light intensity shower rain',
521: 'shower rain',
522: 'heavy intensity shower rain',
531: 'ragged shower rain',
600: 'light snow',
601: 'snow',
602: 'heavy snow',
611: 'sleet',
612: 'shower sleet',
615: 'light rain and snow ',
616: 'rain and snow ',
620: 'light shower snow',
621: 'shower snow',
622: 'heavy shower snow',
701: 'mist',
711: 'smoke',
721: 'haze',
731: 'Sand/Dust Whirls',
741: 'Fog',
751: 'sand',
761: 'dust',
762: 'VOLCANIC ASH',
771: 'SQUALLS',
781: 'TORNADO',
800: 'sky is clear',
801: 'few clouds',
802: 'scattered clouds',
803: 'broken clouds',
804: 'overcast clouds',
}
|
bkosciow/doton
|
service/openweather_codes.py
|
Python
|
mit
| 1,557
|
"""
* Copyright 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License") you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
from pyjamas import DOM
from pyjamas.ui.Widget import Widget
from pyjamas.Canvas.Color import Color
def cvt(s):
return s
"""*
* Deferred binding implementation of GWTCanvas.
"""
class GWTCanvasImplDefault:
def __init__(self):
self.canvasContext = None
def arc(self, x, y, radius, startAngle, endAngle, antiClockwise):
self.canvasContext.arc(x,y,radius,startAngle,endAngle,antiClockwise)
def beginPath(self):
self.canvasContext.beginPath()
def clear(self, width, height):
self.clearRect(0,0,width,height)
def closePath(self):
self.canvasContext.closePath()
def createElement(self):
e = DOM.createElement("CANVAS")
self.setCanvasContext(e.getContext('2d'))
return e
def cubicCurveTo(self, cp1x, cp1y, cp2x, cp2y, x, y):
self.canvasContext.bezierCurveTo(cp1x,cp1y,cp2x,cp2y,x,y)
def setFont(self, font):
self.canvasContext.font = font
def fillText(self, text, sourceX, sourceY, maxWidth=None):
# TODO: split this dog's dinner into browser-specific
# and pyjd-specific overrides...
try:
if maxWidth is None:
self.canvasContext.fillText(text, sourceX, sourceY)
else:
self.canvasContext.fillText(text, sourceX, sourceY, maxWidth)
except:
self.saveContext()
self.translate(sourceX, sourceY)
try:
text = unicode(text) # for pyjd / xulrunner
self.canvasContext.mozDrawText(text) # old xulrunner
except:
self.canvasContext.drawText(text)
self.restoreContext()
def drawImage(self, img, sourceX, sourceY, sourceWidth=None, sourceHeight=None, destX=None, destY=None, destWidth=None, destHeight=None):
if isinstance(img, Widget):
img = img.getElement()
if sourceWidth is None:
self.canvasContext.drawImage(img,sourceX,sourceY)
else:
self.canvasContext.drawImage(img,sourceX,sourceY,sourceWidth,sourceHeight,destX,destY,destWidth,destHeight)
def fill(self):
self.canvasContext.fill()
def fillRect(self, startX, startY, width, height):
self.canvasContext.fillRect(startX,startY,width,height)
def getGlobalAlpha(self):
return self.canvasContext.globalAlpha
def getGlobalCompositeOperation(self):
return self.canvasContext.globalCompositeOperation
def getHeight(self, elem):
return DOM.getElementPropertyInt(elem, "height")
def getLineCap(self):
return self.canvasContext.lineCap
def getLineJoin(self):
return self.canvasContext.lineJoin
def getLineWidth(self):
return self.canvasContext.lineWidth
def getMiterLimit(self):
return self.canvasContext.miterLimit
def getWidth(self, elem):
return DOM.getElementPropertyInt(elem, "width")
def lineTo(self, x, y):
self.canvasContext.lineTo(x,y)
def moveTo(self, x, y):
self.canvasContext.moveTo(x,y)
def quadraticCurveTo(self, cpx, cpy, x, y):
self.canvasContext.quadraticCurveTo(cpx,cpy,x,y)
def rect(self, x, y, width, height):
self.canvasContext.rect(x,y,width,height)
def restoreContext(self):
self.canvasContext.restore()
def rotate(self, angle):
self.canvasContext.rotate(angle)
def saveContext(self):
self.canvasContext.save()
def scale(self, x, y):
self.canvasContext.scale(x,y)
def setBackgroundColor(self, element, color):
DOM.setStyleAttribute(element, "backgroundColor", color)
def setCoordHeight(self, elem, height):
DOM.setElemAttribute(elem, "height", str(height))
def setCoordWidth(self, elem, width):
DOM.setElemAttribute(elem,"width", str(width))
def setStrokeStyle(self, gradient):
if isinstance(gradient, Color): # is it a colorString?
gradient = str(gradient)
elif not isinstance(gradient, basestring): # is it a colorString?
gradient = gradient.getObject() # it's a gradient object
self.canvasContext.strokeStyle = cvt(gradient)
def setFillStyle(self, gradient):
if isinstance(gradient, Color): # is it a colorString?
gradient = str(gradient)
elif not isinstance(gradient, basestring): # is it a colorString?
gradient = gradient.getObject() # it's a gradient object
self.canvasContext.fillStyle = cvt(gradient)
def setGlobalAlpha(self, alpha):
self.canvasContext.globalAlpha = alpha
def setGlobalCompositeOperation(self, globalCompositeOperation):
self.canvasContext.globalCompositeOperation = cvt(globalCompositeOperation)
def setLineCap(self, lineCap):
self.canvasContext.lineCap = cvt(lineCap)
def setLineJoin(self, lineJoin):
self.canvasContext.lineJoin = cvt(lineJoin)
def setLineWidth(self, width):
self.canvasContext.lineWidth = width
def setMiterLimit(self, miterLimit):
self.canvasContext.miterLimit = miterLimit
def setPixelHeight(self, elem, height):
DOM.setStyleAttribute(elem, "height", "%dpx" % height)
def setPixelWidth(self, elem, width):
DOM.setStyleAttribute(elem, "width", "%dpx" % width)
def stroke(self):
self.canvasContext.stroke()
def strokeRect(self, startX, startY, width, height):
self.canvasContext.strokeRect(startX,startY,width,height)
def transform(self, m11, m12, m21, m22, dx, dy):
self.canvasContext.transform(m11,m12,m21,m22,dx,dy)
def translate(self, x, y):
self.canvasContext.translate(x,y)
def clearRect(self, startX, startY, width, height):
self.canvasContext.clearRect(startX,startY,width,height)
def setCanvasContext(self, ctx):
self.canvasContext = ctx
|
Hasimir/pyjs
|
pyjswidgets/pyjamas/Canvas/GWTCanvasImplDefault.py
|
Python
|
apache-2.0
| 6,520
|
#/usr/bin/python
# -*- coding: UTF-8 -*-
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
import urllib
import json
import datetime
import time
import re
import urlparse
import hashlib
import time
import json
from pymongo import MongoClient
from bson.objectid import ObjectId
from pymongo import ASCENDING, DESCENDING
from bson.code import Code
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
client = MongoClient('10.0.0.31', 57017)
db = client.ICCv1
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, object):
return str(obj)
else:
return json.JSONEncoder.default(self, obj)
def get_project_id_by_host(http_host):
rst = db.iWeixin_rsh_service_project.find_one({'serive_http_host':{'$regex':http_host,'$options':'i'},'__REMOVED__':False})
print rst
if rst == None:
print "Please set http_host in icc dashboard."
return None
else:
return unicode(rst[u'project_id'])
class Project():
def __init__(self,project_id):
self.project_id = project_id
def get_weixin_config(self):
print "get_weixin_config query"
print {'project_id':self.project_id,'alias':'iWeixin_application','__REMOVED__':False}
weixin_application = db.idatabase_collections.find_one({'project_id':self.project_id,'alias':'iWeixin_application','__REMOVED__':False})
print "weixin_application"
print weixin_application
collection_name = "idatabase_collection_%s"%(str(weixin_application[u'_id']),)
print "collection_name is"
print collection_name
return db[collection_name].find_one({'is_product':True,'__REMOVED__':False})
def update_user_info(self,datas):
weixin_application = db.idatabase_collections.find_one({'project_id':self.project_id,'alias':'iWeixin_user','__REMOVED__':False})
collection_name = "idatabase_collection_%s"%(str(weixin_application[u'_id']),)
collection = db[collection_name]
if datas.has_key('subscribe'):
datas['subscribe'] = datas['subscribe'] and True or False
user_info = collection.find_one({'openid':datas['openid'],'__REMOVED__':False})
print user_info
print datas
if user_info==None:
datas['__REMOVED__'] = False;
now = datetime.datetime.utcnow()
datas['__CREATE_TIME__'] = now
datas['__MODIFY_TIME__'] = now
print collection.insert(datas)
else:
print collection.update({'_id':user_info['_id']},{'$set':datas})
return True
class AuthorizeHandler(tornado.web.RequestHandler):
def get(self):
try:
project_id = self.get_argument('project_id',None)
if project_id==None:
project_id = get_project_id_by_host(self.request.host)
p = Project(project_id)
weixin_config = p.get_weixin_config()
print weixin_config
redirect = self.get_argument('redirect',None)
scope = self.get_argument('scope','snsapi_userinfo')#snsapi_base|snsapi_userinfo
if redirect==None:
return self.write("""Please set the redirect parameter.""")
append = self.get_secure_cookie('__WEIXIN_OAUTH_INFO__')
append = None
if append!=None:
redirect = urllib.unquote(redirect)
url = unicode("%s%s%s"%(redirect,redirect.find('?')>0 and '&' or '?',append)).encode('utf-8')
self.redirect(url)
else:
redirect_uri = self.redirect_uri(redirect)
url = "https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=code&scope=%s&state=%s#wechat_redirect"%(weixin_config[u'appid'], redirect_uri,scope,scope)
self.redirect(url)
except Exception,e:
return self.write(str(e))
def redirect_uri(self, redirect):
redirect = unicode(redirect).encode('utf-8')
if redirect.find("%")==-1:
redirect = urllib.quote(redirect)
return urllib.quote("http://%s/weixin/sns/callback?redirect=%s"%(self.request.host,redirect))
class AccessTokenHandler(tornado.web.RequestHandler):
def sign(self, openid, secret_key, timestamp):
return hashlib.sha1("%s|%s|%s"%(openid, secret_key, timestamp)).hexdigest()
@tornado.web.asynchronous
def get(self):
self.client = tornado.httpclient.AsyncHTTPClient()
self.redirect = ''
self.access_token = {}
self.scope = ''
self.secret_key = ''
project_id = self.get_argument('project_id',None)
if project_id==None:
project_id = get_project_id_by_host(self.request.host)
code = self.get_argument('code',None)
state = self.get_argument('state')
self.scope = state
self.redirect = urllib.unquote(self.get_argument('redirect'))
if code==None:
return self.write("""User cancel the authorization.""")
p = Project(project_id)
weixin_config = p.get_weixin_config()
appid = weixin_config[u'appid']
secret = weixin_config[u'secret']
self.secret_key = weixin_config[u'secretKey']
print "weixin_config"
print weixin_config
self.client.fetch("https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&scope=%s&grant_type=authorization_code"%(appid,secret,code,self.scope),callback=self.on_authorize,validate_cert=False)
@tornado.web.asynchronous
def on_authorize(self, response):
body = json.loads(response.body)
if body.has_key('errmsg') and body['errmsg']!='ok':
self.write("on_authorize error:%s"%(response.body,))
self.finish()
else:
self.access_token = body
access_token = unicode(body[u'access_token'])
expires_in = unicode(body[u'expires_in'])
refresh_token = unicode(body[u'refresh_token'])
openid = unicode(body[u'openid'])
scope = unicode(body[u'scope'])
print "scope"
print scope
print self.scope
if scope=='snsapi_base':
project_id = self.get_argument('project_id',None)
if project_id==None:
project_id = get_project_id_by_host(self.request.host)
p = Project(project_id)
datas = {}
datas['openid'] = openid
datas['access_token'] = body
p.update_user_info(datas)
self.redirect_append_params({})
else:
self.client.fetch("https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s&lang=zh_CN"%(access_token,openid),callback=self.on_getuserinfo,validate_cert=False)
def on_getuserinfo(self,response):
user_info = json.loads(response.body)
if user_info.has_key('errmsg') and user_info['errmsg']!='ok':
self.write("on_getuserinfo error:%s"%(response.body,))
self.finish()
else:
#写入数据库
project_id = self.get_argument('project_id',None)
if project_id==None:
project_id = get_project_id_by_host(self.request.host)
p = Project(project_id)
user_info['access_token'] = self.access_token
p.update_user_info(user_info)
self.redirect_append_params(user_info)
def redirect_append_params(self,datas={}):
try:
append = ''
if self.access_token.has_key('openid'):
timestamp = int(time.time())
sign = self.sign(self.access_token['openid'],self.secret_key,timestamp)
base = {"FromUserName": self.access_token['openid'],"scope":self.access_token['scope'],'timestamp':timestamp,'signkey':sign}
params = dict(base, **datas)
new_params = {};
for k,v in params.items():
if isinstance(v, basestring):
new_params[k] = v.encode('utf-8')
elif isinstance(v,int):
new_params[k] = v
try:
if 'openid' in new_params:
del new_params['openid']
if 'province' in new_params:
del new_params['province']
if 'city' in new_params:
del new_params['city']
if 'language' in new_params:
del new_params['language']
except Exception,e:
pass
append = urllib.urlencode(new_params)
self.set_secure_cookie('__WEIXIN_OAUTH_INFO__',append,30)
else:
self.write("""self.access_token is undefined""")
self.finish()
url = unicode("%s%s%s"%(self.redirect,self.redirect.find('?')>0 and '&' or '?',append)).encode('utf-8')
self.write("""<html><head><meta http-equiv="refresh" content="0; url=%s" /></head><body></body></html>"""%(url,))
self.finish()
except Exception,e:
print e
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application([
(r"/weixin/sns/index", AuthorizeHandler),
(r"/weixin/sns/callback", AccessTokenHandler)
],autoreload=True,cookie_secret="72DDE445B09542BF0BC2F3E2E172EE6B")
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
icatholic/icc
|
scripts/weixin/service.py
|
Python
|
bsd-3-clause
| 10,180
|
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import mozdns
from mozdns.domain.models import Domain
from mozdns.view.models import View
from mozdns.mixins import ObjectUrlMixin, DisplayMixin
from mozdns.validation import validate_first_label, validate_name
from mozdns.validation import validate_ttl
class LabelDomainMixin(models.Model):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``LabelDomainMixin``, you must include ``domain`` and
``label`` explicitly if you need them to.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
This field makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
domain = models.ForeignKey(Domain, null=False, help_text="FQDN of the "
"domain after the short hostname. "
"(Ex: <i>Vlan</i>.<i>DC</i>.mozilla.com)")
# "The length of any one label is limited to between 1 and 63 octets."
# -- RFC218
label = models.CharField(max_length=63, blank=True, null=True,
validators=[validate_first_label],
help_text="Short name of the fqdn")
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name], db_index=True)
class Meta:
abstract = True
class ViewMixin(models.Model):
def validate_views(instance, views):
for view in views:
instance.clean_views(views)
views = models.ManyToManyField(
View, blank=True, validators=[validate_views]
)
class Meta:
abstract = True
def clean_views(self, views):
"""cleaned_data is the data that is going to be called with for
updating an existing or creating a new object. Classes should implement
this function according to their specific needs.
"""
for view in views:
if hasattr(self, 'domain'):
self.check_no_ns_soa_condition(self.domain, view=view)
if hasattr(self, 'reverse_domain'):
self.check_no_ns_soa_condition(self.reverse_domain, view=view)
def check_no_ns_soa_condition(self, domain, view=None):
if domain.soa:
fail = False
root_domain = domain.soa.root_domain
if root_domain and not root_domain.nameserver_set.exists():
fail = True
elif (view and
not root_domain.nameserver_set.filter(views=view).exists()):
fail = True
if fail:
raise ValidationError(
"The zone you are trying to assign this record into does "
"not have an NS record, thus cannnot support other "
"records.")
class MozdnsRecord(ViewMixin, DisplayMixin, ObjectUrlMixin):
ttl = models.PositiveIntegerField(default=3600, blank=True, null=True,
validators=[validate_ttl],
help_text="Time to Live of this record")
description = models.CharField(max_length=1000, blank=True, null=True,
help_text="A description of this record.")
# fqdn = label + domain.name <--- see set_fqdn
def __str__(self):
self.set_fqdn()
return self.bind_render_record()
def __repr__(self):
return "<{0} '{1}'>".format(self.rdtype, str(self))
class Meta:
abstract = True
@classmethod
def get_api_fields(cls):
"""
The purpose of this is to help the API decide which fields to expose
to the user when they are creating and updateing an Object. This
function should be implemented in inheriting models and overriden to
provide additional fields. Tastypie ignores any relational fields on
the model. See the ModelResource definitions for view and domain
fields.
"""
return ['fqdn', 'ttl', 'description', 'views']
def clean(self):
# The Nameserver and subclasses of BaseAddressRecord do not call this
# function
self.set_fqdn()
self.check_TLD_condition()
self.check_no_ns_soa_condition(self.domain)
self.check_for_delegation()
if self.rdtype != 'CNAME':
self.check_for_cname()
def delete(self, *args, **kwargs):
if self.domain.soa:
self.domain.soa.schedule_rebuild()
from mozdns.utils import prune_tree
call_prune_tree = kwargs.pop('call_prune_tree', True)
objs_domain = self.domain
super(MozdnsRecord, self).delete(*args, **kwargs)
if call_prune_tree:
prune_tree(objs_domain)
def save(self, *args, **kwargs):
self.full_clean()
if self.pk:
# We need to get the domain from the db. If it's not our current
# domain, call prune_tree on the domain in the db later.
db_domain = self.__class__.objects.get(pk=self.pk).domain
if self.domain == db_domain:
db_domain = None
else:
db_domain = None
no_build = kwargs.pop("no_build", False)
super(MozdnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the soa
if self.domain.soa:
self.domain.soa.schedule_rebuild()
if db_domain:
from mozdns.utils import prune_tree
prune_tree(db_domain)
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label,
self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
"""
"If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = mozdns.cname.models.CNAME
if hasattr(self, 'label'):
if CNAME.objects.filter(domain=self.domain,
label=self.label).exists():
raise ValidationError("A CNAME with this name already exists.")
else:
if CNAME.objects.filter(label='', domain=self.domain).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""
If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
try:
if not self.domain.delegated:
return
except ObjectDoesNotExist:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
def check_TLD_condition(self):
domain = Domain.objects.filter(name=self.fqdn)
if not domain:
return
if self.label == '' and domain[0] == self.domain:
return # This is allowed
else:
raise ValidationError("You cannot create an record that points "
"to the top level of another domain.")
|
rtucker-mozilla/mozilla_inventory
|
mozdns/models.py
|
Python
|
bsd-3-clause
| 8,260
|
import time
from pulsar import HttpException
from pulsar.apps import ws
from pulsar.apps.data import PubSubClient, create_store
from pulsar.utils.system import json
from pulsar.utils.string import random_string
def home(request):
from django.shortcuts import render_to_response
from django.template import RequestContext
return render_to_response('home.html', {
'HOST': request.get_host()
}, RequestContext(request))
class ChatClient(PubSubClient):
def __init__(self, websocket):
self.joined = time.time()
self.websocket = websocket
self.websocket._chat_client = self
def __call__(self, channel, message):
# The message is an encoded JSON string
self.websocket.write(message, opcode=1)
class Chat(ws.WS):
''':class:`.WS` handler managing the chat application.'''
_store = None
_pubsub = None
_client = None
def get_pubsub(self, websocket):
'''Create the pubsub handler if not already available'''
if not self._store:
cfg = websocket.cfg
self._store = create_store(cfg.data_store)
self._client = self._store.client()
self._pubsub = self._store.pubsub()
webchat = '%s:webchat' % cfg.exc_id
chatuser = '%s:chatuser' % cfg.exc_id
yield from self._pubsub.subscribe(webchat, chatuser)
return self._pubsub
def on_open(self, websocket):
'''A new websocket connection is established.
Add it to the set of clients listening for messages.
'''
pubsub = yield from self.get_pubsub(websocket)
pubsub.add_client(ChatClient(websocket))
user, _ = self.user(websocket)
users_key = 'webchatusers:%s' % websocket.cfg.exc_id
# add counter to users
registered = yield from self._client.hincrby(users_key, user, 1)
if registered == 1:
yield from self.publish(websocket, 'chatuser', 'joined')
def on_close(self, websocket):
'''Leave the chat room
'''
user, _ = self.user(websocket)
users_key = 'webchatusers:%s' % websocket.cfg.exc_id
registered = yield from self._client.hincrby(users_key, user, -1)
pubsub = yield from self.get_pubsub(websocket)
pubsub.remove_client(websocket._chat_client)
if not registered:
yield from self.publish(websocket, 'chatuser', 'gone')
if registered <= 0:
yield from self._client.hdel(users_key, user)
def on_message(self, websocket, msg):
'''When a new message arrives, it publishes to all listening clients.
'''
if msg:
lines = []
for l in msg.split('\n'):
l = l.strip()
if l:
lines.append(l)
msg = ' '.join(lines)
if msg:
return self.publish(websocket, 'webchat', msg)
def user(self, websocket):
user = websocket.handshake.get('django.user')
if user.is_authenticated():
return user.username, True
else:
session = websocket.handshake.get('django.session')
user = session.get('chatuser')
if not user:
user = 'an_%s' % random_string(length=6).lower()
session['chatuser'] = user
return user, False
def publish(self, websocket, channel, message=''):
user, authenticated = self.user(websocket)
msg = {'message': message,
'user': user,
'authenticated': authenticated,
'channel': channel}
channel = '%s:%s' % (websocket.cfg.exc_id, channel)
return self._pubsub.publish(channel, json.dumps(msg))
class middleware(object):
'''Django middleware for serving the Chat websocket.'''
def __init__(self):
self._web_socket = ws.WebSocket('/message', Chat())
def process_request(self, request):
from django.http import HttpResponse
environ = request.META
environ['django.user'] = request.user
environ['django.session'] = request.session
try:
response = self._web_socket(environ)
except HttpException as e:
return HttpResponse(status=e.status)
if response is not None:
# we have a response, this is the websocket upgrade.
# Convert to django response
resp = HttpResponse(status=response.status_code,
content_type=response.content_type)
for header, value in response.headers:
resp[header] = value
return resp
else:
environ.pop('django.user')
environ.pop('django.session')
|
ymero/pulsar
|
examples/djchat/djchat/views.py
|
Python
|
bsd-3-clause
| 4,766
|
from django.shortcuts import render_to_response,get_object_or_404,render
from django.template import Context, RequestContext,loader
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from models import Auth
from forms import LoginForm
authenticated = 0
def index(request):
global authenticated
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
details = Auth.objects.create(
username = form.cleaned_data['username'],
passwd = form.cleaned_data['password'],
)
dbdet = Auth.objects.get(pk = 1)
if dbdet.username == details.username and dbdet.passwd == details.passwd:
authenticated = 1
return HttpResponseRedirect('welcome/')
else:
return HttpResponse("Sorry! Wrong call")
else:
authenticated = 0
form = LoginForm()
context = RequestContext(request)
return render_to_response('login_page.html',{ 'form': form}, context)
else:
authenticated = 0
form = LoginForm()
context = RequestContext(request)
return render_to_response('login_page.html',{ 'form': form}, context)
def welcome(request):
t=loader.get_template('welcome.html')
c= RequestContext(request)
return HttpResponse(t.render(c))
def if_authenticated():
return authenticated
# Create your views here.
|
aswinm/libman
|
login/views.py
|
Python
|
gpl-2.0
| 1,296
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import subprocess
import stations
windows = subprocess.check_output(
['tmux', 'list-windows', '-F', '#{window_name}']).strip().decode().split()
count = 0
for key in stations.city.keys():
if key in windows:
print(key)
count += 1
exit(count)
#print(windows)
|
endlisnis/weather-records
|
activeWindowCount.py
|
Python
|
gpl-3.0
| 325
|
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import requests
import six.moves.urllib.parse as urlparse
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
message = _('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s') % {'_status': response.status_code,
'_body': response.text}
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authentication error")
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Authorization error")
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = _("Item not found")
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None,
ssl_verify=True, stream=False):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = urlparse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if netaddr.valid_ipv6(hostname):
hostname = "[%s]" % hostname
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
LOG.info(_("Doing %(method)s on %(relative_url)s"),
{'method': method, 'relative_url': relative_url})
if body:
LOG.info(_("Body: %s") % body)
if port:
_url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)
else:
_url = "%s://%s%s" % (scheme, hostname, relative_url)
response = requests.request(method, _url, data=body, headers=_headers,
verify=ssl_verify, stream=stream)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message=_("Unexpected status code"),
response=response)
return response
def _decode_json(self, response):
body = response.text
LOG.debug("Decoding JSON: %s" % (body))
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return self.api_request(relative_uri, **kwargs)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def put_volume(self, volume_id, volume):
return self.api_put('/volumes/%s' % volume_id, volume)['volume']
|
github-borat/cinder
|
cinder/tests/integrated/api/client.py
|
Python
|
apache-2.0
| 7,946
|
from .miscutils import *
from .timeutils import *
from .logutils import *
from .argsutils import *
from .error_message_utils import *
from .handleutils import * # This needs argsutils
from .routingkeys import *
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/allutils.py
|
Python
|
apache-2.0
| 213
|
#!/usr/bin/env python2.7
import os
import subprocess as sub
import time
wd = os.path.dirname(os.path.realpath(__file__))
print os.path.dirname(os.path.realpath(__file__))
cmd = os.path.join( wd, 'n_jobs.sh' )
res = sub.check_output( [cmd] )
print int(res)
while int(sub.check_output( [cmd] )) >= 1 :
time.sleep( 60 )
|
jdbrice/rcpMaker
|
runner/wait_for_jobs.py
|
Python
|
mit
| 327
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Sphinx build configuration and documentation file writer."""
import unittest
from l2tdevtools import dependencies
from l2tdevtools.dependency_writers import sphinx_docs
from l2tdevtools.helpers import project
from tests import test_lib
class SphinxBuildConfigurationWriterTest(test_lib.BaseTestCase):
"""Tests the Sphinx build configuration file writer."""
def testInitialize(self):
"""Tests that the writer can be initialized."""
l2tdevtools_path = '/fake/l2tdevtools/'
project_definition = project.ProjectHelper(l2tdevtools_path)
dependencies_file = self._GetTestFilePath(['dependencies.ini'])
test_dependencies_file = self._GetTestFilePath(['test_dependencies.ini'])
dependency_helper = dependencies.DependencyHelper(
dependencies_file=dependencies_file,
test_dependencies_file=test_dependencies_file)
writer = sphinx_docs.SphinxBuildConfigurationWriter(
l2tdevtools_path, project_definition, dependency_helper)
self.assertIsNotNone(writer)
# TODO: Add test for the Write method.
class SphinxBuildRequirementsWriterTest(test_lib.BaseTestCase):
"""Tests the Sphinx build requirements file writer."""
def testInitialize(self):
"""Tests that the writer can be initialized."""
l2tdevtools_path = '/fake/l2tdevtools/'
project_definition = project.ProjectHelper(l2tdevtools_path)
dependencies_file = self._GetTestFilePath(['dependencies.ini'])
test_dependencies_file = self._GetTestFilePath(['test_dependencies.ini'])
dependency_helper = dependencies.DependencyHelper(
dependencies_file=dependencies_file,
test_dependencies_file=test_dependencies_file)
writer = sphinx_docs.SphinxBuildRequirementsWriter(
l2tdevtools_path, project_definition, dependency_helper)
self.assertIsNotNone(writer)
# TODO: Add test for the Write method.
if __name__ == '__main__':
unittest.main()
|
joachimmetz/l2tdevtools
|
tests/dependency_writers/sphinx_docs.py
|
Python
|
apache-2.0
| 1,974
|
# Copyright 2007, 2008, 2009, 2010 Manuel Arriaga
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from __future__ import with_statement
import os, user, pickle,re,stat, shutil,time,fcntl
import cfg
# CODE #####
re_filename = re.compile("^(.*?) ?(?:\[(.*)\])?(?:\\.\w*)?$")
assert(cfg.TIMESTAMP_TO_USE in ("ctime", "atime", "mtime"))
# Terminology. Given
#
# filename = 'document1[tag1,tag2].txt'
#
# we define
#
# purename = 'document1'
# docname = 'document1[tag1,tag2]'
# ext = 'txt'
########################
# This class is used to store the metadata for a doc together (i) its path and (ii) a flag
# which indicates whether the current metadata for this doc is more recent than that inscribed
# into its path (in which case, the metadata is contained in the cfg.pending_updates_filename file
# in the doc_dir containing this doc)
class Doc:
def __init__(self, path, docname, extension, timestamp=None, metadataMoreRecentThanFilename = False):
self.path = path.rstrip('/')
self.docname = docname # this is the filename MINUS the [dot+]extension; ie, it INCLUDES THE TAGS STRING
self.extension = extension
self.timestamp = timestamp
self.metadataMoreRecentThanFilename = metadataMoreRecentThanFilename
return
pass
class LockFile:
def __init__(self, fileObject, path):
self.fileObject = fileObject
self.path = path
return
pass
# returns a LockFile object (see above) of the lock file at lockfilepath or None
# (in case the wait for the lock times out). If exclusiveLock==False,
# tries to acquire a shared lock instead (suitably for concurrent reads
# but no write). timeout==None makes this function block (possibly forever)
# until the lock becomes available.
def acquire_lock(lockfilepath, timeout, exclusiveLock=True, interval_between_attempts_to_acquire = 0.2):
lock_file = open(lockfilepath, "w")
start_time = time.time()
while timeout == None or time.time() - start_time < timeout:
try: fcntl.flock(lock_file.fileno(), (fcntl.LOCK_EX if exclusiveLock else fcntl.LOCK_SH) | (fcntl.LOCK_NB if timeout != None else 0))
except IOError:
#print "failed to get lock, will wait..."
time.sleep(interval_between_attempts_to_acquire)
else:
#print "got lock!"
return LockFile(lock_file, lockfilepath)
pass
else:
#print "timed out, quitting!"
return None
pass
# arg is the LockFile object returned by acquire_lock(). Returns True if
# lock was released successfully, False otherwise
def release_lock(lockfile_object):
try: fcntl.flock(lockfile_object.fileObject.fileno(), fcntl.LOCK_UN)
except IOError:
#print "couldn't release lock"
retval = False
else:
#print "released lock"
retval = True
finally:
lockfile_object.fileObject.close()
try: os.unlink(lockfile_object.path)
except: pass
pass
return retval
def acquireInternalOpsFileLock(exclusiveLock = True):
return acquire_lock(os.path.join(user.home, cfg.oyepa_internal_ops_lockfilename), cfg.TIMEOUT_ON_WAITING_FOR_LOCK, exclusiveLock)
def acquireTagCacheFileLock(docDir, exclusiveLock = True):
return acquire_lock(os.path.join(docDir,cfg.tag_cache_lockfilename), cfg.TIMEOUT_ON_WAITING_FOR_LOCK, exclusiveLock)
def acquirePendingUpdatesFileLock(docDir, exclusiveLock = True):
return acquire_lock(os.path.join(docDir,cfg.pending_updates_lockfilename), cfg.TIMEOUT_ON_WAITING_FOR_LOCK, exclusiveLock)
# this function reads from the on-disk configuration file which doc dirs we
# should be operating on. It is called when this module is initialized
# to set the global var doc_dirs (see EOF).
# [UPDATE!] Tibor Csogor [tibi@tiborius.net] contributed much of the code
# in this function, modifying it so that
#
# (i) entries in the config file ending with "/*" are understood as requests
# to watch that directory *and all directories underneath it*, recursing
# all the way down. Eg, a line "/home/user/project/*" will setup watches
# on /home/user/project *and all directories beneath it (including
# /home/user/project/abc/def/ghj).
# (ii) the tilde is now understood as referring to the user's home dir.
# So, you can simply add a dir inside your home dir by adding the line
# "~/project" to the config file.
def readDocDirList():
dirlist_path = os.path.join(user.home, cfg.FILENAME_LISTING_DOC_DIRS)
new_doc_dirs = set()
if not os.path.exists(dirlist_path): return [] # nothing for us to do here
with open(dirlist_path, "r") as f:
dirLines = f.readlines()
pass
raw_doc_dirs = set()
for i in dirLines:
i = i.strip()
if i == '/' or i == '/*':
print "WARNING: ignoring request to watch '/'!"
pass
i = i.rstrip('/')
if len(i) == 0:
continue
i = os.path.expanduser(i)
if not i.startswith('/'):
i = os.path.join(user.home, i)
pass
raw_doc_dirs.add(i)
pass
new_doc_dirs = set()
for pth in raw_doc_dirs:
d = pth
if pth.endswith('*'):
d = pth[:-2] # we remove TWO characters, '/*'
pass
if not os.path.isdir(d):
print ("WARNING: ignoring request to watch '%s': not a directory!"% d)
continue
if pth.endswith('*'):
os.path.walk(d, dirVisitor, new_doc_dirs) # this is NOT the same as os.walk(). The 'dirVisitor' function is defined below, it merely adds all (sub)dirs it finds to the set of doc_dirs. new_doc_dirs, the last element in this list of arguments, is passed as the first argument of function dirVisitor [dirVisitor merely .add()s each (sub)dir it finds to that set.]
else:
new_doc_dirs.add(d)
pass
pass
return new_doc_dirs
def dirVisitor(new_doc_dirs, dirname, names): # helper function used by readDocDirList(); see above
new_doc_dirs.add(dirname)
return
# This function provides basic exclusion rules for filenames
def should_skip(filename):
return filename[0] in ('.','#') or filename[len(filename)-1] =='~' or filename in cfg.FILENAMES_TO_IGNORE
def validDocName(s):
return len(s) > 0 and not ("[" in s or "]" in s or "/" in s)
def validTag(s):
return len(s) > 0 and not ("," in s or "[" in s or "]" in s or "/" in s)
# Allows the GUI module to find out which doc_dirs we are operating on.
# NOTE: simply accessing "doc_dirs" (after "from fslayer import *") IS
# BAD, since it will return the list as it was at the time if the import
# statement
def getDocDirs(): return doc_dirs
# returns a triplet containing (purename, list of tags, metadata is more recent
# than that in filename).
# Returns None in case of an error (arg outside of any doc_dir).
def getCurrentPureNameAndTagsForDoc(path):
doc_dir, arg_filename = \
os.path.split(os.path.abspath(path.rstrip('/')))
if doc_dir not in doc_dirs: return None,None,None
updates_dic = read_pending_updates(doc_dir)
current_virtual_filename = \
updates_dic[arg_filename] if arg_filename in updates_dic else arg_filename
purename, tags = split_purename_and_tags_from_filename(current_virtual_filename)
return purename, tags, current_virtual_filename != arg_filename
def split_docname_ext(filename):
filename = filename.lower()
docname = filename
ext = None
if '.' in filename:
dot = filename.rindex('.')
if dot+1 < len(filename) and '[' not in filename[dot:] and ']' not in filename[dot:]:
ext = filename[dot+1:]
docname = filename[:dot]
pass
pass
return docname, ext
# Argument can be either a filename or a docname (since we
# accept filenames without extension, a docname is always
# an acceptable filename).
# Returns (purename, list of tags).
#
# If the second, optional arg is set to True, then words (len>=3) found in
# the purename will also be included in the list of 'tags' we return.
#
# NOTE! It is important that this function only includes words in purename as tags
# when called from runQuery and rebuildTagCache. The rest of the code should only get
# the "real" tags (eg, the renameTag function).
def split_purename_and_tags_from_filename(filename, includeWordsInPurenameAsTags = False):
tags = []
purename, tag_str = re.match(re_filename, filename).groups()
if tag_str != None: tags = [tag.strip() for tag in tag_str.lower().split(",")]
if includeWordsInPurenameAsTags:
fake_tags_from_purename = purename.lower().split()
fake_tags_from_purename = map(lambda i: i.strip(':').strip(',').strip('-').strip('!').strip('(').strip(')'), fake_tags_from_purename)
fake_tags_from_purename = filter(lambda i: len(i) > 2 and i.isalpha() and i.lower() != "and", fake_tags_from_purename)
tags.extend(set(fake_tags_from_purename).difference(tags)) # make sure we don't add duplicates
pass
return purename, tags
def read_tag_cache(doc_dir, mustGetLock=True):
tags_count = {}
# by default we get a shared ("concurrent reads") lock; however, when called
# from inside update_tag_cache() we don't get any (since that function already
# has a write lock)
if mustGetLock: lock_file = acquireTagCacheFileLock(doc_dir, exclusiveLock = False)
try:
filepath = os.path.join(doc_dir, cfg.tag_cache_filename)
if os.path.exists(filepath):
with open(filepath, "rb") as tag_cache_file:
tags_count = pickle.load(tag_cache_file)
pass
pass
pass
finally:
if mustGetLock: release_lock(lock_file)
pass
return tags_count
def update_tag_cache(doc_dir, origTags, newTags):
#print "doc_Dir: " + str(doc_dir) + "; origtags: " + str(origTags) + "; newtags: " + str(newTags)
if origTags == newTags: return # nothing to be done here
# get an exclusive ("write") lock
lock_file = acquireTagCacheFileLock(doc_dir)
try:
# read tags_count for this dir from the disk
tags_count = read_tag_cache(doc_dir, mustGetLock=False)
# update the tags_count
for tag in set(newTags).difference(origTags):
if tag in tags_count: tags_count[tag] += 1
else: tags_count[tag] = 1
pass
for tag in set(origTags).difference(newTags):
if tag in tags_count:
tags_count[tag] -= 1
if tags_count[tag] <= 0: del tags_count[tag]
pass
pass
# write it to disk
with open(os.path.join(doc_dir, cfg.tag_cache_filename), "wb") as tag_cache_file:
pickle.dump(tags_count, tag_cache_file)
pass
pass
finally: release_lock(lock_file)
return
# Read the updates pending for the docs contained in the dir named in its arg.
def read_pending_updates(doc_dir):
updates_dic = {}
filepath = os.path.join(doc_dir, cfg.pending_updates_filename)
lock_file = acquirePendingUpdatesFileLock(doc_dir, exclusiveLock = False)
try:
if os.path.exists(filepath):
with open(filepath, "rb") as update_file:
updates_dic = pickle.load(update_file)
pass
pass
pass
finally: release_lock(lock_file)
return updates_dic
# Writes to disk the updates pending for the docs contained in the dir named in its arg
def write_pending_updates(doc_dir, updates_dic):
pending_updates_path = os.path.join(doc_dir, cfg.pending_updates_filename)
lock_file = acquirePendingUpdatesFileLock(doc_dir)
try:
with open(pending_updates_path, "wb") as pending_updates_file:
pickle.dump(updates_dic, pending_updates_file)
pass
pass
finally: release_lock(lock_file)
return
# puts together a filename from all the metadata provided in its args.
# Returns None if filename would be too long. Notice that first arg
# is a *purename*, not a docname.
def assemble_filename_from_metadata(purename, tags, extension):
new_filename = purename
if len(tags) > 0:
new_filename += '['
for t in tags: new_filename += t + ", "
new_filename = new_filename.rstrip(", ")
new_filename += ']'
pass
if extension != None and len(extension) > 0:
new_filename += '.' + extension
pass
return new_filename if len(new_filename) <= cfg.MAX_FILENAME_LEN else None
# 'purename' and 'tags' are the NEW (pure)name and tags to be applied
# to this doc. in case of error returns an error message (string), otherwise None
def tagDoc(path, purename, newTags, origTags):
doc_dir, current_filename_in_fs = \
os.path.split(os.path.abspath(path.rstrip('/')))
assert(doc_dir in doc_dirs), "[oyepa BUG] tagDoc called with a path which does not point into a doc_dir!"
extension = split_docname_ext(current_filename_in_fs)[1]
new_filename = assemble_filename_from_metadata(purename, newTags, extension)
if new_filename == None: return "Trouble: filename would be too long"
updates_dic = read_pending_updates(doc_dir)
# this copy of the updates_dic is used to find out if new_filename is
# already reserved for _some other doc_ which is pending a rename
updates_dic_for_all_other_docs = updates_dic.copy()
if current_filename_in_fs in updates_dic_for_all_other_docs: del updates_dic_for_all_other_docs[current_filename_in_fs]
if (current_filename_in_fs != new_filename and os.path.exists(os.path.join(doc_dir, new_filename))) or \
(new_filename in updates_dic_for_all_other_docs.values()):
# 1st test means that a file/dir holding a doc OTHER THAN THE ONE WE ARE TAGGING already exists. Actually, this places a minor
# artificial constraint: this function will fail even if the file/dir which already exists is simply a PREVIOUS incarnation of another doc,
# which will be renamed soon. This shouldn't be a showstopper in the majority of cases, and prevents possible confusion when we
# perform the updates at the time the filemon shuts down (ie, we would have to be careful to first rename that other, already existing
# doc before renaming the one we are now tagging to its new name).
# 2nd test means that this new_filename is already defined as the new name for _some other_ (hence the use of a copy of the updates_dic from which the current_filename_in_fs entry has been removed) doc pending a rename
return "Trouble: a name collision occurred!\nPath: %s"%os.path.join(doc_dir, new_filename) # can't do, name is taken (by some other doc, otherwise previous branch would have been run)
elif current_filename_in_fs == new_filename:
if current_filename_in_fs in updates_dic: # this test IS necessary, because the GUI might call tagDoc without any changes to the metadata having occurred
del updates_dic[current_filename_in_fs] # doc's metadata has reverted to its previous form inscribed into its path
pass
pass
else: updates_dic[current_filename_in_fs] = new_filename
write_pending_updates(doc_dir, updates_dic)
update_tag_cache(doc_dir, origTags, newTags)
return
# returns a list of 'Doc' objects describing the matching docs. In the case
# of files without an extension, their Doc.extension will be None. In the case
# of dirs without an extension, their Doc.extension will be set to FAKE_EXTENSION_FOR_DIRS.
# (Both files as well as dirs with an extension will be treated in the same way.
# Calling code should not discriminate between them, either.) Code which calls this
# function should interpret all these possible values correctly.
def runQuery(keywords, extensions, dirs, listUntagged=False):
matches = []
if keywords != None: keywords = [kw.strip().lower() for kw in keywords]
if extensions != None: extensions = [e.strip().lower() for e in extensions]
if type(extensions) == list and len(extensions) == 0: extensions = None
if type(keywords) == list and len(keywords) == 0: keywords = None
if dirs == None or len(dirs) == 0: dirs = doc_dirs
dirs = map(lambda d: os.path.abspath(d).rstrip('/'), dirs) # the cmd line util, ds, might pass us relative paths. And in the tight loop below profiling showed that a call to os.path.join() added .5 secs on my machine for each additional 10,000 docs
for doc_dir in dirs:
updates_dic = read_pending_updates(doc_dir)
filenames = set(os.listdir(doc_dir))
filenames.difference_update(updates_dic.keys())
filenames.update(updates_dic.values())
# this dic is used to retrieve the path to the actual file of
# docs which had their metadata updated (ie, the new metadata
# leaves in the file holding pending updates, while the path storing
# the doc reflects previous metadata)
rev_updates_dic = {}
for (old,recent) in updates_dic.items():
rev_updates_dic[recent]=os.path.join(doc_dir,old)
pass
for filename in filenames:
if should_skip(filename): continue
# if this is a 'recent' file (meaning no file with this filename
# actually exists in the filesystem, yet), then we need to get
# the real path for this doc: -
if filename in rev_updates_dic:
path = rev_updates_dic[filename] # this is correct, dict value really is a path (not a filename); see above
metadataMoreRecentThanFilename = True
else:
path = doc_dir + '/' + filename
metadataMoreRecentThanFilename = False
pass
docname, ext = split_docname_ext(filename)
try:
st = os.stat(path)
isdir = stat.S_ISDIR(st[stat.ST_MODE])
timestamp = getattr(st, "st_" + cfg.TIMESTAMP_TO_USE)
except OSError:
isdir = None
timestamp = None
pass
if ext == None and isdir: ext= cfg.FAKE_EXTENSION_FOR_DIRS # to directories without an extension (e.g., "album") we give this "artificial" extension
if (extensions == None or (ext != None and ext.lower() in extensions) or (cfg.extensionlessDirsAlwaysMatch and ext == cfg.FAKE_EXTENSION_FOR_DIRS)) and \
( (listUntagged and '[' not in docname) or (not listUntagged and keywords == None) or (not listUntagged and \
len(filter(lambda kw: kw in docname, keywords)) == len(keywords))):
matches.append(Doc(path=path, docname=docname, extension=ext, timestamp=timestamp, metadataMoreRecentThanFilename=metadataMoreRecentThanFilename))
pass
pass
pass
if cfg.SORT_RESULTS_BY == "timestamp": matches.sort(key= lambda d: d.timestamp, reverse=True)
elif cfg.SORT_RESULTS_BY == "name": matches.sort(key= lambda d: d.docname.lower())
else: assert False, "SORT_RESULTS_BY SET TO INVALID VALUE!"
return matches
# Returns a set containing all tags
def getAllTags():
# no need for tag cache lock(s), we get one inside read_tag_cache
allTags = set()
for doc_dir in doc_dirs:
filepath = os.path.join(doc_dir, cfg.tag_cache_filename)
if not os.path.exists(filepath): rebuildTagCache(doc_dir)
tags_count = read_tag_cache(doc_dir)
allTags.update(tags_count.keys())
pass
return allTags
def removeTag(tag): renameTag(tag, None) # see renameTag...
# It is the caller's task to check whether this will cause two tags
# to be merged (ie, whether newTag already exists).
# if newTag is set to None, then the tag named in the first arg is removed (see removeTag implementation above)
# In case of error, an error message (string) is returned. Otherwise, you get back None.
def renameTag(oldTag, newTag):
if newTag != None: newTag = newTag.lower().strip()
for doc_dir in doc_dirs:
updates_dic = read_pending_updates(doc_dir)
filenames = set(os.listdir(doc_dir))
filenames.difference_update(updates_dic.keys())
filenames.update(updates_dic.values())
# this dic is used to retrieve the path to the actual file of
# docs which had their metadata updated (ie, the new metadata
# leaves in the file holding pending updates, while the path storing
# the doc reflects previous metadata)
rev_updates_dic = {}
for (old,recent) in updates_dic.items():
rev_updates_dic[recent]=os.path.join(doc_dir,old)
pass
for filename in filenames:
if should_skip(filename): continue
# if this is a 'recent' file (meaning no file with this filename
# actually exists in the filesystem, yet), then we need to get
# the real path for this doc: -
if filename in rev_updates_dic:
path = rev_updates_dic[filename]
else: path = os.path.join(doc_dir, filename)
purename, origTags = split_purename_and_tags_from_filename(filename)
if oldTag not in origTags: continue # nothing to do here, move along
newTags = []
for tag in origTags:
if tag == oldTag and newTag != None and newTag not in origTags: newTags.append(newTag)
elif tag != oldTag: newTags.append(tag)
pass
retval = tagDoc(path, purename, newTags, origTags)
if type(retval) == str: return retval # error, abort
pass
pass
return None
# Rebuilds the tag cache files in all doc-dirs. If arg is None,
# operates on all doc_dirs
def rebuildTagCache(arg=None):
if arg == None: dirs = doc_dirs
else: dirs = [arg]
for doc_dir in dirs:
lock_file = acquireTagCacheFileLock(doc_dir) # get write lock
try:
tags_count = {}
updates_dic = read_pending_updates(doc_dir)
filenames = set(os.listdir(doc_dir))
filenames.difference_update(updates_dic.keys())
filenames.update(updates_dic.values())
for filename in filenames:
if should_skip(filename): continue
tags = split_purename_and_tags_from_filename(filename, cfg.includeWordsInPurenameAsTags)[1]
for tag in tags:
if tag in tags_count: tags_count[tag] += 1
else: tags_count[tag] = 1
pass
pass
# write it to disk
with open(os.path.join(doc_dir, cfg.tag_cache_filename), "wb") as tag_cache_file:
pickle.dump(tags_count, tag_cache_file)
pass
pass
finally: release_lock(lock_file)
pass
return
# by default secures a "read"/shared lock; but it might also be called from
# code which already has a write/exclusive lock on the internal ops file, hence
# the arg
def readInternalOps(mustGetLock=True):
if mustGetLock: lock_file = acquireInternalOpsFileLock(exclusiveLock = False) # this one needs a share ("concurrent reads") lock
try:
internalOps = []
oyepa_internal_ops_filepath = \
os.path.join(user.home, cfg.oyepa_internal_ops_filename)
if os.path.exists(oyepa_internal_ops_filepath):
with open(oyepa_internal_ops_filepath, "rb") as f:
internalOps = pickle.load(f)
pass
pass
pass
finally:
if mustGetLock: release_lock(lock_file)
pass
return internalOps
# returns the time of our (meaning the "client"/GUI code, as opposed to the
# filemon) last modification to the internalOps file. Calling code should pass
# the mtime we return to waitOnInternalOpsCleared(); the latter
# function will call removeFromOyepaInternalOps if the filemon hasn't shown up
# on time to demonstrate it has learnt which paths are being manipulated through
# internal operations)
#
# Arg can be either a path or a list of paths
def addToOyepaInternalOps(argPath_s):
paths = []
if type(argPath_s) == str: paths.append(argPath_s) # important, never extend() a list of str with a str argument (individual chars end being inserted, each as an item of its own)
elif type(argPath_s) == list: paths.extend(argPath_s)
else: assert False, "Invalid arg passed to addToOyepaInternalOps()"
paths = [p.rstrip('/') for p in paths]
lock_file = acquireInternalOpsFileLock() # this one needs an exclusive ("write") lock
try:
internalOps = readInternalOps(mustGetLock=False)
internalOps.extend(paths)
oyepa_internal_ops_filepath = \
os.path.join(user.home, cfg.oyepa_internal_ops_filename)
with open(oyepa_internal_ops_filepath, "wb") as f:
pickle.dump(internalOps, f)
pass
pass
finally: release_lock(lock_file)
return os.stat(oyepa_internal_ops_filepath).st_mtime
def removeFromOyepaInternalOps(argPath_s):
paths = []
if type(argPath_s) == str: paths.append(argPath_s) # important, never extend() a list of str with a str argument (individual chars end being inserted, each as an item of its own)
elif type(argPath_s) == list: paths.extend(argPath_s)
else: assert False, "Invalid arg passed to removeFromOyepaInternalOps()"
paths = [p.rstrip('/') for p in paths]
lock_file = acquireInternalOpsFileLock() # this one needs an exclusive ("write") lock
try:
internalOps = readInternalOps(mustGetLock=False)
internalOps = list(set(internalOps).difference(paths)) # remove these paths from list (if present)
oyepa_internal_ops_filepath = \
os.path.join(user.home, cfg.oyepa_internal_ops_filename)
with open(oyepa_internal_ops_filepath, "wb") as f:
pickle.dump(internalOps, f)
pass
pass
finally: release_lock(lock_file)
return
# Waits until the filemon has ignored this internal operation
# or until we give up on waiting for it
# (based on TIMEOUT_ON_WAITING_FOR_FILEMON). If it times out,
# this function runs removeFromInternalOps() on paths listed
# in its first arg. Always returns None
def waitOnInternalOpsCleared(argPath_s, previous_mtime):
paths = []
if type(argPath_s) == str: paths.append(argPath_s) # important, never extend() a list of str with a str argument (individual chars end being inserted, each as an item of its own)
elif type(argPath_s) == list: paths.extend(argPath_s)
else: assert False, "Invalid arg passed to waitOnInternalOpsCleared()"
paths = [p.rstrip('/') for p in paths]
while True:
time.sleep(0.2)
if len(set(readInternalOps()).intersection(paths)) == 0:
break
if time.time() - previous_mtime >= cfg.TIMEOUT_ON_WAITING_FOR_FILEMON:
print "wait on internal ops cleared TIMING OUT "
removeFromOyepaInternalOps(argPath_s) # filemon doesn't seem to be reacting, "forget" about these paths ourselves
break
pass
return
# NOTE: this function tests whether 'path' is in the list of
# paths currently being manipulated by the oyepa GUI. If
# the answer is positive, then *it removes that path from
# the internalOps list* and returns True. Otherwise, returns
# False.
#
# This function is invoked only by the filemon to find out whether
# or not it should ignore an operation it spots. It is for the
# changes performed by this function to the internalOps file
# that waitOnInternalOpsCleared looks.
def isInternalOyepaOp(path):
path = path.rstrip('/')
internalOps = readInternalOps()
if path in internalOps:
print "ignoring path %s, is internalOp"%path
removeFromOyepaInternalOps(path)
pass
return path in internalOps
def copyDocTo(origPath, destPath):
mtime = addToOyepaInternalOps([origPath, destPath]) # tell filemon not to interfere (in this case, not to prompt the user to tag the new copy of this doc)
errorMsg = None
try:
if os.path.isdir(origPath): shutil.copytree(origPath, destPath)
else: shutil.copyfile(origPath, destPath)
pass
except Exception, e:
errorMsg = "Copying this doc failed.\nException: " + str(e)
finally: waitOnInternalOpsCleared([origPath, destPath], mtime)
# since we have told the filemon not to interfere with our filesystem operations,
# we must update the tags cache ourselves
add_tags_in_path_to_cache(destPath)
return errorMsg
def moveDocTo(origPath, destPath):
mtime = addToOyepaInternalOps([origPath,destPath]) # tell filemon not to interfere
errorMsg = None
try: os.rename(origPath, destPath)
except Exception, e:
errorMsg = "Moving this doc failed.\nException: " + str(e)
finally: waitOnInternalOpsCleared([origPath, destPath], mtime)
# since we have told the filemon not to interfere with our filesystem operations,
# we must update the tags cache ourselves
remove_tags_in_path_from_cache_and_filename_from_pending_updates(origPath)
add_tags_in_path_to_cache(destPath)
return errorMsg
def removeDoc(path):
print "removeDoc(path=%s)"%path
mtime = addToOyepaInternalOps(path) # tell filemon not to interfere
errorMsg = None
try:
if os.path.isdir(path): shutil.rmtree(path)
else: os.unlink(path)
pass
except Exception, e:
errorMsg = "Removing this doc failed.\nException: " + str(e)
finally: waitOnInternalOpsCleared(path, mtime)
# since we have told the filemon not to interfere with our filesystem operations,
# we must update the tags cache ourselves
remove_tags_in_path_from_cache_and_filename_from_pending_updates(path)
return errorMsg
def add_tags_in_path_to_cache(path):
doc_dir,filename = os.path.split(os.path.abspath(path.rstrip('/')))
if doc_dir not in doc_dirs: return # we simply ignore paths to files outside of the doc repository
tags = split_purename_and_tags_from_filename(filename, cfg.includeWordsInPurenameAsTags)[1]
if tags != None: update_tag_cache(doc_dir, [], tags)
return
def remove_tags_in_path_from_cache_and_filename_from_pending_updates(path):
print "remove_tags_in_path_from_cache_and_filename_from_pending_updates"
doc_dir,filename = os.path.split(os.path.abspath(path.rstrip('/')))
if doc_dir not in doc_dirs: return # we simply ignore paths to files outside of the doc repository
updates_dic = read_pending_updates(doc_dir)
if filename in updates_dic:
print "update pending for deleted path (%s->%s), will remove (i) tags embedded in update and (ii) update itself"%(path,updates_dic[filename])
orig_filename = filename
filename = updates_dic[filename]
del updates_dic[orig_filename]
write_pending_updates(doc_dir, updates_dic)
pass
origTags = split_purename_and_tags_from_filename(filename, cfg.includeWordsInPurenameAsTags)[1]
if origTags != None: update_tag_cache(doc_dir, origTags, [])
return
# MODULE TOP LEVEL CODE ########################################
doc_dirs = readDocDirList()
|
kisp/oyepa-git
|
fslayer.py
|
Python
|
gpl-2.0
| 32,224
|
import os
import sys
WIN = sys.platform == 'win32' or os.name == 'nt'
if not WIN:
def get_binpath(prefix):
return os.path.join(prefix, 'bin')
def get_exec_path(binpath, execbin):
return os.path.join(binpath, execbin)
def exec_bin(execbin, argv, env):
os.execve(execbin, argv, env)
else: # pragma: nocover
import subprocess
def get_binpath(prefix):
return os.path.join(prefix, 'Scripts')
def get_exec_path(binpath, execbin):
if '.exe' not in execbin:
return os.path.join(binpath, '{}.exe'.format(execbin))
else:
return os.path.join(binpath, execbin)
def exec_bin(execbin, argv, env):
exe = subprocess.Popen(
[execbin] + argv[1:],
env=env,
universal_newlines=True
)
exe.communicate()
sys.exit(exe.returncode)
|
bertjwregeer/vrun
|
src/vrun/oscompat.py
|
Python
|
isc
| 884
|
#!/usr/bin/env python
from struct import pack
import bq78350
import smbus
import srec
import sys
import time
# Load SREC
data = srec.Segment(bq78350.data_srec_origin, bq78350.data_segment_size, \
pack('>B', 0xFF))
code = srec.Segment(bq78350.code_srec_origin, bq78350.code_segment_size * 4, \
pack('>L', bq78350.code_default_value))
linker = srec.Linker(data, code)
f = srec.Reader(sys.argv[1])
linker.link(f)
# Access device
with smbus.Bus() as bus:
bus.gpio_set(32)
time.sleep(1)
bus.gpio_set(0)
with bq78350.Device(bus) as dev:
print "Entering ROM Mode"
with dev.EnterROMMode() as prog:
# Start programming
print " Version ", prog.Version()
print "Erasing program space"
prog.EraseCode()
print " Checksum", '0x%08X' % prog.CodeChecksum(0)
print "Erasing data space"
prog.EraseData()
print " Checksum", '0x%04X' % prog.DataChecksum()
print "Writing out data"
prog.WriteData(data)
print " Checksum", '0x%04X' % prog.DataChecksum()
print "Writing out code"
prog.WriteCodeBytes(code)
print " Checksum", '0x%08X' % prog.CodeChecksum(0)
print "Leaving ROM Mode"
# Verify we booted up
print "Temperature:", dev.Temperature()
print "Voltage: ", dev.Voltage()
print "Rel SoC: ", dev.RelativeStateOfCharge()
print "Abs SoC: ", dev.AbsoluteStateOfCharge()
print "Rem Capcity:", dev.RemainingCapacity()
print "Capacity: ", dev.FullChargeCapacity()
|
taiwenko/python
|
bmutest/program.py
|
Python
|
mit
| 1,499
|
import sys
import logging
from jetcomcrawl.modes import categories, items, details
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if len(sys.argv) < 2:
print('Please enter the mode you wish to run:\npython3 run.py [get_categories|get_items|get_details]')
sys.exit()
mode = sys.argv[1]
if mode == 'get_categories':
worker = categories.Worker()
worker.crawl()
elif mode == 'get_items':
worker = items.Worker()
worker.work()
elif mode == 'get_details':
worker = details.Worker()
worker.work()
|
tdickman/jetcom-crawl
|
jetcomcrawl/run.py
|
Python
|
mit
| 601
|
# -*- coding: utf-8 -*-
#
# DDT documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 21 23:00:01 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Specific for readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
docs_root = os.path.dirname(__file__)
sys.path.insert(0, os.path.split(docs_root)[0])
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
if not on_rtd:
extensions.append('sphinxcontrib.programoutput')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DDT'
# pylint: disable-msg=W0622
# - copyright is a builtin
copyright = u'2012, Carles Barrobés'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from ddt import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DDTdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DDT.tex', u'DDT Documentation',
u'Carles Barrobés', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ddt', u'DDT Documentation',
[u'Carles Barrobés'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DDT', u'DDT Documentation',
u'Carles Barrobés', 'DDT', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
domidimi/ddt
|
docs/conf.py
|
Python
|
mit
| 8,103
|
import pkg_resources
from pyramid.static import static_view
def includeme(config):
"""Add static route support to the Configurator.
"""
config.add_directive('add_static_route', add_static_route)
def add_static_route(config, package, subdir, cache_max_age=3600,
**add_route_args):
"""Add a route and view to serve static files from a directory.
I create a catchall route that serves all URLs from a directory of static
files if the corresponding file exists. Subdirectories are also handled.
For example, the URL "/robots.txt" corresponds to file
"PACKAGE/SUBDIR/robots.txt", and "/images/header/logo.png"
corresponds to "PACKAGE/SUBDIR/images/header/logo.png". If the file
doesn't exist, the route won't match the URL, and Pyramid will continue to
the next route or traversal. The route name is 'static', which must not
conflict with your application's other routes.
This serves URLs from the "static" directory in package "myapp".
Arguments:
* ``config``: a ``pyramid.config.Configurator`` instance.
* ``package``: the name of the Python package containing the static files.
* ``subdir``: the subdirectory in the package that contains the files.
This should be a relative directory with '/' separators regardless of
platform.
* ``cache_max_age``: influences the ``Expires`` and ``Max-Age``
response headers returned by the view (default is 3600 seconds or one hour
minutes).
* ``**add_route_args``: additional arguments to ``config.add_route``.
'name' defaults to "static" but can be overridden. (Every route in your
application must have a unique name.) 'pattern' and 'view' may not be
specified and will raise TypeError if they are.
"""
for bad_arg in ["pattern", "view"]:
if bad_arg in add_route_args:
raise TypeError("keyword arg '%s' is not allowed")
name = add_route_args.pop("name", "static")
pattern = "/*subpath"
asset = "%s:%s" % (package, subdir)
view = static_view(asset, cache_max_age)
custom_preds = add_route_args.pop("custom_predicates", [])
preds = [StaticViewPredicate(package, subdir)]
preds.extend(custom_preds)
config.add_route(name, pattern, custom_predicates=preds, **add_route_args)
config.add_view(view, route_name=name)
#### Private stuff
class StaticViewPredicate(object):
def __init__(self, package, subdir):
self.package = package
self.subdir = subdir
def __call__(self, info, request):
subpath = info["match"]["subpath"]
#log.debug("subpath is %r", subpath)
if not subpath:
#log.debug("no subpath, returning false")
return False
parts = [self.subdir]
parts.extend(subpath)
resource_name = "/".join(parts)
#log.debug("package=%r, resource_name=%r", self.package, resource_name)
return pkg_resources.resource_exists(self.package, resource_name)
|
hlwsmith/akhet
|
akhet/static.py
|
Python
|
mit
| 2,988
|
#coding=utf-8
from django.contrib import admin
from biz.floating.models import Floating
class FloatingAdmin(admin.ModelAdmin):
list_display = ("id", "ip", "status", "instance","user")
admin.site.register(Floating, FloatingAdmin)
|
zhanghui9700/eonboard
|
eoncloud_web/biz/floating/admin.py
|
Python
|
apache-2.0
| 241
|
from colab.plugins.views import ColabProxyView
class TaigaPluginProxyView(ColabProxyView):
app_label = 'colab_taiga'
diazo_theme_template = 'proxy/taiga.html'
add_remote_user = True
|
mes-2016-1/colab-taiga-plugin
|
src/colab_taiga/views.py
|
Python
|
gpl-3.0
| 196
|
import contextlib
import unittest
import pytest
from cryptography import x509
import six
from ipapython.dn import DN, RDN, AVA
if six.PY3:
unicode = str
def cmp(a, b):
if a == b:
assert not a < b
assert not a > b
assert not a != b
assert a <= b
assert a >= b
return 0
elif a < b:
assert not a > b
assert a != b
assert a <= b
assert not a >= b
return -1
else:
assert a > b
assert a != b
assert not a <= b
assert a >= b
return 1
pytestmark = pytest.mark.tier0
def expected_class(klass, component):
if klass is AVA:
if component == 'self':
return AVA
elif klass is RDN:
if component == 'self':
return RDN
elif component == 'AVA':
return AVA
elif klass is DN:
if component == 'self':
return DN
elif component == 'AVA':
return AVA
elif component == 'RDN':
return RDN
raise ValueError("class %s with component '%s' unknown" % (klass.__name__, component))
class TestAVA(unittest.TestCase):
def setUp(self):
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.attr2 = 'ou'
self.value2 = 'People'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.attr3 = 'c'
self.value3 = 'US'
self.str_ava3 = '%s=%s' % (self.attr3, self.value3)
self.ava3 = AVA(self.attr3, self.value3)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with attr,value pair
ava1 = AVA(self.attr1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with "attr=value" string
ava1 = AVA(self.str_ava1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with tuple (attr, value)
ava1 = AVA((self.attr1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with list [attr, value]
ava1 = AVA([self.attr1, self.value1])
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with no args should fail
with self.assertRaises(TypeError):
AVA()
# Create with more than 3 args should fail
with self.assertRaises(TypeError):
AVA(self.attr1, self.value1, self.attr1, self.attr1)
# Create with 1 arg which is not string should fail
with self.assertRaises(TypeError):
AVA(1)
# Create with malformed AVA string should fail
with self.assertRaises(ValueError):
AVA("cn")
# Create with non-string parameters, should convert
ava1 = AVA(1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA((1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA(self.attr1, 1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
ava1 = AVA((self.attr1, 1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
def test_indexing(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1[self.attr1], self.value1)
self.assertEqual(ava1[0], self.attr1)
self.assertEqual(ava1[1], self.value1)
with self.assertRaises(KeyError):
ava1['foo'] # pylint: disable=pointless-statement
with self.assertRaises(KeyError):
ava1[3] # pylint: disable=pointless-statement
def test_properties(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1.attr, self.attr1)
self.assertIsInstance(ava1.attr, unicode)
self.assertEqual(ava1.value, self.value1)
self.assertIsInstance(ava1.value, unicode)
def test_str(self):
ava1 = AVA(self.ava1)
self.assertEqual(str(ava1), self.str_ava1)
self.assertIsInstance(str(ava1), str)
def test_cmp(self):
# Equality
ava1 = AVA(self.attr1, self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
self.assertTrue(ava1 == self.str_ava1)
self.assertFalse(ava1 != self.str_ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case attr should still be equal
ava1 = AVA(self.attr1.upper(), self.value1)
self.assertFalse(ava1.attr == self.attr1)
self.assertTrue(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case value should still be equal
ava1 = AVA(self.attr1, self.value1.upper())
self.assertTrue(ava1.attr == self.attr1)
self.assertFalse(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's attr greater
with self.assertRaises(AttributeError):
ava1.attr = self.attr1 + "1"
ava1 = AVA(self.attr1 + "1", self.value1.upper())
self.assertFalse(ava1 == self.ava1)
self.assertTrue(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
# Reset ava1's attr, should be equal again
with self.assertRaises(AttributeError):
ava1.attr = self.attr1
ava1 = AVA(self.attr1, self.value1.upper())
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
ava1.value = self.value1 + "1"
ava1 = AVA(self.attr1, self.value1 + "1")
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
def test_hashing(self):
# create AVA's that are equal but differ in case
ava1 = AVA((self.attr1.lower(), self.value1.upper()))
ava2 = AVA((self.attr1.upper(), self.value1.lower()))
# AVAs that are equal should hash to the same value.
self.assertEqual(ava1, ava2)
self.assertEqual(hash(ava1), hash(ava2))
# Different AVA objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
ava1_a = AVA(self.ava1)
ava1_b = AVA(self.ava1)
ava2_a = AVA(self.ava2)
ava2_b = AVA(self.ava2)
ava3_a = AVA(self.ava3)
ava3_b = AVA(self.ava3)
self.assertEqual(ava1_a, ava1_b)
self.assertEqual(ava2_a, ava2_b)
self.assertEqual(ava3_a, ava3_b)
d = dict()
s = set()
d[ava1_a] = str(ava1_a)
d[ava1_b] = str(ava1_b)
d[ava2_a] = str(ava2_a)
d[ava2_b] = str(ava2_b)
s.add(ava1_a)
s.add(ava1_b)
s.add(ava2_a)
s.add(ava2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([ava1_a, ava2_a]))
self.assertEqual(sorted(s), sorted([ava1_a, ava2_a]))
self.assertTrue(ava1_a in d)
self.assertTrue(ava1_b in d)
self.assertTrue(ava2_a in d)
self.assertTrue(ava2_b in d)
self.assertFalse(ava3_a in d)
self.assertFalse(ava3_b in d)
self.assertTrue(ava1_a in s)
self.assertTrue(ava1_b in s)
self.assertTrue(ava2_a in s)
self.assertTrue(ava2_b in s)
self.assertFalse(ava3_a in s)
self.assertFalse(ava3_b in s)
class TestRDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = 'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_ava3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.str_rdn3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.rdn3 = RDN(self.ava1, self.ava2)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
rdn1 = RDN((self.attr1, self.value1))
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple attr,value pairs
rdn3 = RDN((self.attr1, self.value1), (self.attr2, self.value2))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs passed as lists
rdn3 = RDN([self.attr1, self.value1], [self.attr2, self.value2])
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN canonical ordering
# should remain the same
rdn3 = RDN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single AVA object
rdn1 = RDN(self.ava1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple AVA objects
rdn3 = RDN(self.ava1, self.ava2)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple AVA objects but reverse constructor
# parameter ordering. RDN canonical ordering should remain
# the same
rdn3 = RDN(self.ava2, self.ava1)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single string with 1 AVA
rdn1 = RDN(self.str_rdn1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with single string with 2 AVA's
rdn3 = RDN(self.str_rdn3)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
def test_properties(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1.attr, self.attr1)
self.assertIsInstance(rdn1.attr, unicode)
self.assertEqual(rdn1.value, self.value1)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn2.attr, self.attr2)
self.assertIsInstance(rdn2.attr, unicode)
self.assertEqual(rdn2.value, self.value2)
self.assertIsInstance(rdn2.value, unicode)
self.assertEqual(rdn3.attr, self.attr1)
self.assertIsInstance(rdn3.attr, unicode)
self.assertEqual(rdn3.value, self.value1)
self.assertIsInstance(rdn3.value, unicode)
def test_str(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(str(rdn1), self.str_rdn1)
self.assertIsInstance(str(rdn1), str)
self.assertEqual(str(rdn2), self.str_rdn2)
self.assertIsInstance(str(rdn2), str)
self.assertEqual(str(rdn3), self.str_rdn3)
self.assertIsInstance(str(rdn3), str)
def test_cmp(self):
# Equality
rdn1 = RDN((self.attr1, self.value1))
self.assertTrue(rdn1 == self.rdn1)
self.assertFalse(rdn1 != self.rdn1)
self.assertTrue(rdn1 == self.str_rdn1)
self.assertFalse(rdn1 != self.str_rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's attr greater
rdn1 = RDN((self.attr1 + "1", self.value1))
self.assertFalse(rdn1 == self.rdn1)
self.assertTrue(rdn1 != self.rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Reset rdn1's attr, should be equal again
rdn1 = RDN((self.attr1, self.value1))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's value greater
# attr will be equal, this tests secondary comparision component
rdn1 = RDN((self.attr1, self.value1 + "1"))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Make sure rdn's with more ava's are greater
result = cmp(self.rdn1, self.rdn3)
self.assertEqual(result, -1)
result = cmp(self.rdn3, self.rdn1)
self.assertEqual(result, 1)
def test_indexing(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1[0], self.ava1)
self.assertEqual(rdn1[self.ava1.attr], self.ava1.value)
with self.assertRaises(KeyError):
rdn1['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn2[0], self.ava2)
self.assertEqual(rdn2[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn2['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[self.ava1.attr], self.ava1.value)
self.assertEqual(rdn3[1], self.ava2)
self.assertEqual(rdn3[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn3['foo'] # pylint: disable=pointless-statement
self.assertEqual(rdn1.attr, self.attr1)
self.assertEqual(rdn1.value, self.value1)
with self.assertRaises(TypeError):
rdn3[1.0] # pylint: disable=pointless-statement
# Slices
self.assertEqual(rdn3[0:1], [self.ava1])
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
def test_assignments(self):
rdn = RDN((self.attr1, self.value1))
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
rdn[0] = self.ava2
def test_iter(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1[:], [self.ava1])
for i, ava in enumerate(rdn1):
if i == 0:
self.assertEqual(ava, self.ava1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn1)))
self.assertEqual(len(rdn2), 1)
self.assertEqual(rdn2[:], [self.ava2])
for i, ava in enumerate(rdn2):
if i == 0:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn2)))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
for i, ava in enumerate(rdn3):
if i == 0:
self.assertEqual(ava, self.ava1)
elif i == 1:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn3)))
def test_concat(self):
rdn1 = RDN((self.attr1, self.value1))
rdn2 = RDN((self.attr2, self.value2))
# in-place addtion
rdn1 += rdn2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.str_ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
# concatenation
rdn1 = RDN((self.attr1, self.value1))
rdn3 = rdn1 + rdn2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.str_ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
def test_hashing(self):
# create RDN's that are equal but differ in case
rdn1 = RDN((self.attr1.lower(), self.value1.upper()))
rdn2 = RDN((self.attr1.upper(), self.value1.lower()))
# RDNs that are equal should hash to the same value.
self.assertEqual(rdn1, rdn2)
self.assertEqual(hash(rdn1), hash(rdn2))
class TestDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = u'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = u'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_dn1 = self.str_rdn1
self.dn1 = DN(self.rdn1)
self.str_dn2 = self.str_rdn2
self.dn2 = DN(self.rdn2)
self.str_dn3 = '%s,%s' % (self.str_rdn1, self.str_rdn2)
self.dn3 = DN(self.rdn1, self.rdn2)
self.base_rdn1 = RDN(('dc', 'redhat'))
self.base_rdn2 = RDN(('dc', 'com'))
self.base_dn = DN(self.base_rdn1, self.base_rdn2)
self.container_rdn1 = RDN(('cn', 'sudorules'))
self.container_rdn2 = RDN(('cn', 'sudo'))
self.container_dn = DN(self.container_rdn1, self.container_rdn2)
self.base_container_dn = DN((self.attr1, self.value1),
self.container_dn, self.base_dn)
self.x500name = x509.Name([
x509.NameAttribute(
x509.NameOID.ORGANIZATIONAL_UNIT_NAME, self.value2),
x509.NameAttribute(x509.NameOID.COMMON_NAME, self.value1),
])
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single attr,value pair passed as a tuple
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Creation with multiple attr,value string pairs should fail
with self.assertRaises(ValueError):
dn1 = DN(self.attr1, self.value1, self.attr2, self.value2)
# Create with multiple attr,value pairs passed as tuples & lists
dn1 = DN((self.attr1, self.value1), [self.attr2, self.value2])
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs passed as tuple and RDN
dn1 = DN((self.attr1, self.value1), RDN((self.attr2, self.value2)))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN ordering should also be
# reversed because DN's are a ordered sequence of RDN's
dn1 = DN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single RDN object
dn1 = DN(self.rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with multiple RDN objects, assure ordering is preserved.
dn1 = DN(self.rdn1, self.rdn2)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple RDN objects in different order, assure
# ordering is preserved.
dn1 = DN(self.rdn2, self.rdn1)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single string with 1 RDN
dn1 = DN(self.str_rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single string with 2 RDN's
dn1 = DN(self.str_dn3)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with a python-cryptography 'Name'
dn1 = DN(self.x500name)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with RDN, and 2 DN's (e.g. attr + container + base)
dn1 = DN((self.attr1, self.value1), self.container_dn, self.base_dn)
self.assertEqual(len(dn1), 5)
dn_str = ','.join([str(self.rdn1),
str(self.container_rdn1), str(self.container_rdn2),
str(self.base_rdn1), str(self.base_rdn2)])
self.assertEqual(str(dn1), dn_str)
def test_str(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(str(dn1), self.str_dn1)
self.assertIsInstance(str(dn1), str)
self.assertEqual(str(dn2), self.str_dn2)
self.assertIsInstance(str(dn2), str)
self.assertEqual(str(dn3), self.str_dn3)
self.assertIsInstance(str(dn3), str)
def test_cmp(self):
# Equality
dn1 = DN((self.attr1, self.value1))
self.assertTrue(dn1 == self.dn1)
self.assertFalse(dn1 != self.dn1)
self.assertTrue(dn1 == self.str_dn1)
self.assertFalse(dn1 != self.str_dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's attr greater
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1 + "1"
dn1 = DN((self.attr1 + "1", self.value1))
self.assertFalse(dn1 == self.dn1)
self.assertTrue(dn1 != self.dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Reset dn1's attr, should be equal again
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1
dn1 = DN((self.attr1, self.value1))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
dn1[0].value = self.value1 + "1"
dn1 = DN((self.attr1, self.value1 + "1"))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Make sure dn's with more rdn's are greater
result = cmp(self.dn1, self.dn3)
self.assertEqual(result, -1)
result = cmp(self.dn3, self.dn1)
self.assertEqual(result, 1)
# Test startswith, endswith
container_dn = DN(self.container_dn)
base_container_dn = DN(self.base_container_dn)
self.assertTrue(base_container_dn.startswith(self.rdn1))
self.assertTrue(base_container_dn.startswith(self.dn1))
self.assertTrue(base_container_dn.startswith(self.dn1 + container_dn))
self.assertFalse(base_container_dn.startswith(self.dn2))
self.assertFalse(base_container_dn.startswith(self.rdn2))
self.assertTrue(base_container_dn.startswith((self.dn1)))
self.assertTrue(base_container_dn.startswith((self.rdn1)))
self.assertFalse(base_container_dn.startswith((self.rdn2)))
self.assertTrue(base_container_dn.startswith((self.rdn2, self.rdn1)))
self.assertTrue(base_container_dn.startswith((self.dn1, self.dn2)))
self.assertTrue(base_container_dn.endswith(self.base_dn))
self.assertTrue(base_container_dn.endswith(container_dn + self.base_dn))
self.assertFalse(base_container_dn.endswith(DN(self.base_rdn1)))
self.assertTrue(base_container_dn.endswith(DN(self.base_rdn2)))
self.assertTrue(base_container_dn.endswith((DN(self.base_rdn1), DN(self.base_rdn2))))
# Test "in" membership
self.assertTrue(self.container_rdn1 in container_dn)
self.assertTrue(container_dn in container_dn)
self.assertFalse(self.base_rdn1 in container_dn)
self.assertTrue(self.container_rdn1 in base_container_dn)
self.assertTrue(container_dn in base_container_dn)
self.assertTrue(container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn ==
base_container_dn)
self.assertFalse(self.container_rdn1 in self.base_dn)
def test_indexing(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[self.rdn1.attr], self.rdn1.value)
with self.assertRaises(KeyError):
dn1['foo'] # pylint: disable=pointless-statement
self.assertEqual(dn2[0], self.rdn2)
self.assertEqual(dn2[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn2['foo'] # pylint: disable=pointless-statement
self.assertEqual(dn3[0], self.rdn1)
self.assertEqual(dn3[self.rdn1.attr], self.rdn1.value)
self.assertEqual(dn3[1], self.rdn2)
self.assertEqual(dn3[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn3['foo'] # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
dn3[1.0] # pylint: disable=pointless-statement
def test_assignments(self):
dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
dn[0] = RDN('t=a')
with self.assertRaises(TypeError):
# pylint: disable=unsupported-assignment-operation
dn[0:1] = [RDN('t=a'), RDN('t=b')]
def test_iter(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(len(dn1), 1)
self.assertEqual(dn1[:], self.rdn1)
for i, ava in enumerate(dn1):
if i == 0:
self.assertEqual(ava, self.rdn1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn1)))
self.assertEqual(len(dn2), 1)
self.assertEqual(dn2[:], self.rdn2)
for i, ava in enumerate(dn2):
if i == 0:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn2)))
self.assertEqual(len(dn3), 2)
self.assertEqual(dn3[:], DN(self.rdn1, self.rdn2))
for i, ava in enumerate(dn3):
if i == 0:
self.assertEqual(ava, self.rdn1)
elif i == 1:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(dn3)))
def test_concat(self):
dn1 = DN((self.attr1, self.value1))
dn2 = DN([self.attr2, self.value2])
# in-place addtion
dn1 += dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.rdn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.str_dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
# concatenation
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + self.rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.str_rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
self.assertExpectedClass(DN, dn3[i][0], 'AVA')
dn3 = dn1 + self.str_dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
def test_find(self):
# -10 -9 -8 -7 -6 -5 -4 -3 -2 -1
dn = DN('t=0,t=1,cn=bob,t=3,t=4,t=5,cn=bob,t=7,t=8,t=9')
pat = DN('cn=bob')
# forward
self.assertEqual(dn.find(pat), 2)
self.assertEqual(dn.find(pat, 1), 2)
self.assertEqual(dn.find(pat, 1, 3), 2)
self.assertEqual(dn.find(pat, 2, 3), 2)
self.assertEqual(dn.find(pat, 6), 6)
self.assertEqual(dn.find(pat, 7), -1)
self.assertEqual(dn.find(pat, 1, 2), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 1, 2), -1)
# reverse
self.assertEqual(dn.rfind(pat), 6)
self.assertEqual(dn.rfind(pat, -4), 6)
self.assertEqual(dn.rfind(pat, 6), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, -8), 6)
self.assertEqual(dn.rfind(pat, -8, -4), 6)
self.assertEqual(dn.rfind(pat, -8, -5), 2)
self.assertEqual(dn.rfind(pat, 7), -1)
self.assertEqual(dn.rfind(pat, -3), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, -3), -1)
def test_replace(self):
# pylint: disable=no-member
dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(AttributeError):
dn.replace # pylint: disable=pointless-statement
def test_hashing(self):
# create DN's that are equal but differ in case
dn1 = DN((self.attr1.lower(), self.value1.upper()))
dn2 = DN((self.attr1.upper(), self.value1.lower()))
# DNs that are equal should hash to the same value.
self.assertEqual(dn1, dn2)
# Good, everyone's equal, now verify their hash values
self.assertEqual(hash(dn1), hash(dn2))
# Different DN objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
dn1_a = DN(self.dn1)
dn1_b = DN(self.dn1)
dn2_a = DN(self.dn2)
dn2_b = DN(self.dn2)
dn3_a = DN(self.dn3)
dn3_b = DN(self.dn3)
self.assertEqual(dn1_a, dn1_b)
self.assertEqual(dn2_a, dn2_b)
self.assertEqual(dn3_a, dn3_b)
d = dict()
s = set()
d[dn1_a] = str(dn1_a)
d[dn1_b] = str(dn1_b)
d[dn2_a] = str(dn2_a)
d[dn2_b] = str(dn2_b)
s.add(dn1_a)
s.add(dn1_b)
s.add(dn2_a)
s.add(dn2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([dn1_a, dn2_a]))
self.assertEqual(sorted(s), sorted([dn1_a, dn2_a]))
self.assertTrue(dn1_a in d)
self.assertTrue(dn1_b in d)
self.assertTrue(dn2_a in d)
self.assertTrue(dn2_b in d)
self.assertFalse(dn3_a in d)
self.assertFalse(dn3_b in d)
self.assertTrue(dn1_a in s)
self.assertTrue(dn1_b in s)
self.assertTrue(dn2_a in s)
self.assertTrue(dn2_b in s)
self.assertFalse(dn3_a in s)
self.assertFalse(dn3_b in s)
def test_x500_text(self):
# null DN x500 ordering and LDAP ordering are the same
nulldn = DN()
self.assertEqual(nulldn.ldap_text(), nulldn.x500_text())
# reverse a DN with a single RDN
self.assertEqual(self.dn1.ldap_text(), self.dn1.x500_text())
# reverse a DN with 2 RDNs
dn3_x500 = self.dn3.x500_text()
dn3_rev = DN(self.rdn2, self.rdn1)
self.assertEqual(dn3_rev.ldap_text(), dn3_x500)
# reverse a longer DN
longdn_x500 = self.base_container_dn.x500_text()
longdn_rev = DN(longdn_x500)
l = len(self.base_container_dn)
for i in range(l):
self.assertEqual(longdn_rev[i], self.base_container_dn[l-1-i])
class TestEscapes(unittest.TestCase):
def setUp(self):
self.privilege = 'R,W privilege'
self.dn_str_hex_escape = 'cn=R\\2cW privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
self.dn_str_backslash_escape = 'cn=R\\,W privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
def test_escape(self):
dn = DN(self.dn_str_hex_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
dn = DN(self.dn_str_backslash_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
class TestInternationalization(unittest.TestCase):
def setUp(self):
# Hello in Arabic
self.arabic_hello_utf8 = (b'\xd9\x85\xd9\x83\xd9\x8a\xd9\x84' +
b'\xd8\xb9\x20\xd9\x85\xd8\xa7\xd9' +
b'\x84\xd9\x91\xd8\xb3\xd9\x84\xd8\xa7')
self.arabic_hello_unicode = self.arabic_hello_utf8.decode('utf-8')
def assert_equal_utf8(self, obj, b):
if six.PY2:
self.assertEqual(str(obj), b)
else:
self.assertEqual(str(obj), b.decode('utf-8'))
@contextlib.contextmanager
def fail_py3(self, exception_type):
try:
yield
except exception_type:
if six.PY2:
raise
def test_i18n(self):
self.assertEqual(self.arabic_hello_utf8,
self.arabic_hello_unicode.encode('utf-8'))
# AVA's
# test attr i18n
ava1 = AVA(self.arabic_hello_unicode, 'foo')
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
ava1 = AVA(self.arabic_hello_utf8, 'foo')
if six.PY2:
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, self.arabic_hello_utf8 + b'=foo')
# test value i18n
ava1 = AVA('cn', self.arabic_hello_unicode)
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
ava1 = AVA('cn', self.arabic_hello_utf8)
if six.PY2:
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(ava1, b'cn=' + self.arabic_hello_utf8)
# RDN's
# test attr i18n
rdn1 = RDN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assert_equal_utf8(rdn1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
rdn1 = RDN((self.arabic_hello_utf8, 'foo'))
if six.PY2:
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), self.arabic_hello_utf8 + b'=foo')
# test value i18n
rdn1 = RDN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assert_equal_utf8(rdn1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
rdn1 = RDN(('cn', self.arabic_hello_utf8))
if six.PY2:
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), b'cn=' + self.arabic_hello_utf8)
# DN's
# test attr i18n
dn1 = DN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assert_equal_utf8(dn1, self.arabic_hello_utf8 + b'=foo')
with self.fail_py3(TypeError):
dn1 = DN((self.arabic_hello_utf8, 'foo'))
if six.PY2:
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assertEqual(str(dn1), self.arabic_hello_utf8 + b'=foo')
# test value i18n
dn1 = DN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assert_equal_utf8(dn1, b'cn=' + self.arabic_hello_utf8)
with self.fail_py3(TypeError):
dn1 = DN(('cn', self.arabic_hello_utf8))
if six.PY2:
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assertEqual(str(dn1), b'cn=' + self.arabic_hello_utf8)
if __name__ == '__main__':
unittest.main()
|
apophys/freeipa
|
ipatests/test_ipapython/test_dn.py
|
Python
|
gpl-3.0
| 48,955
|
import numpy as np
class Comparison(object):
def get_num_features(self,shp):
return shp[1] * shp[2] * shp[3]
class Concatenate(Comparison):
def get_num_features(self, shp):
return 2 * shp[1] * shp[2] * shp[3]
def __call__(self, x, y):
return np.concatenate([x.flatten(),y.flatten()])
concatenate = Concatenate()
class Mult(Comparison):
def __call__(self, x, y):
return x.flatten() * y.flatten()
mult = Mult()
class Diff(Comparison):
def __call__(self, x, y):
return x.flatten() - y.flatten()
diff = Diff()
class Absdiff(Comparison):
def __call__(self, x, y):
return np.abs(x.flatten() - y.flatten())
absdiff = Absdiff()
class Sqrtabsdiff(Comparison):
def __call__(self, x, y):
return np.sqrt(np.abs(x.flatten() - y.flatten()))
sqrtabsdiff = Sqrtabsdiff()
class Sqdiff(Comparison):
def __call__(self, x, y):
return (x.flatten() - y.flatten())**2
sqdiff = Sqdiff()
|
yamins81/thor_model_exploration
|
thor_model_exploration/comparisons.py
|
Python
|
mit
| 982
|
from . import res_partner
from . import account_move
|
avanzosc/odoo-addons
|
account_invoice_report_grouped_by_event/models/__init__.py
|
Python
|
agpl-3.0
| 53
|
# -*- coding: utf-8 -*-
# @author: vuolter
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
from future import standard_library
standard_library.install_aliases()
def char(text, chars, repl=''):
return re.sub(r'[{0}]+'.format(chars), repl, text)
_UNIXBADCHARS = ('\0', '/', '\\')
_MACBADCHARS = _UNIXBADCHARS + (':',)
_WINBADCHARS = _MACBADCHARS + ('<', '>', '"', '|', '?', '*')
_WINBADWORDS = (
'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
'con', 'prn')
def name(text, sep='_', allow_whitespaces=False):
"""Remove invalid characters."""
if os.name == 'nt':
bc = _WINBADCHARS
elif sys.platform == 'darwin':
bc = _MACBADCHARS
else:
bc = _UNIXBADCHARS
repl = r''.join(bc)
if not allow_whitespaces:
repl += ' '
res = char(text, repl, sep).strip()
if os.name == 'nt' and res.lower() in _WINBADWORDS:
res = sep + res
return res
def pattern(text, rules):
for rule in rules:
try:
pattr, repl, flags = rule
except ValueError:
pattr, repl = rule
flags = 0
text = re.sub(pattr, repl, text, flags)
return text
def truncate(text, offset):
maxtrunc = len(text) // 2
if offset > maxtrunc:
raise ValueError('String too short to truncate')
trunc = (len(text) - offset) // 3
return '{0}~{1}'.format(text[:trunc * 2], text[-trunc:])
def uniquify(seq):
"""Remove duplicates from list preserving order."""
seen = set()
seen_add = seen.add
return type(seq)(x for x in seq if x not in seen and not seen_add(x))
|
pyblub/pyload
|
pyload/utils/purge.py
|
Python
|
agpl-3.0
| 1,743
|
# Copyright 2008, Patrick C. McGinty
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module for mktoc that provides object(s) to parse text files
describing the layout of an audio CD. After the parse step is
complete, it is possible to access the data or convert into any other
output format. The following object classes are:
ParseData
Stores parsed CD-ROM data and provides methods for modifcation
and access. Automatically generated by invoking the 'parse'
method on one of the Parser classes defined below.
CueParser
An audio CUE sheet text file parsing class. After parsing, the
CUE file can be re-created or converted into a new format.
WavParser
A simplified WAV file parsing class. Using an in-order list of
WAV files, the class can return a simple CUE file output.
"""
__date__ = '$Date$'
__version__ = '$Revision$'
import os
import re
import logging
import itertools as itr
import operator as op
from mktoc.base import *
from mktoc import disc as mt_disc
from mktoc import wav as mt_wav
from mktoc import progress_bar as mt_pb
__all__ = ['CueParser','WavParser']
log = logging.getLogger('mktoc.parser')
##############################################################################
class ParseData(object):
"""Stores parsed CD-ROM data and provides methods for modifcation
and access. Automatically generated by invoking the 'parse' method
defined in one of the Parser classes.
Private Members:
_disc
Disc object that stores global disc info.
_tracks
Track object that stores track info.
_files
In-order list of WAV files that apply to the CD audio.
"""
def __init__(self, disc, tracks, files):
"""Initialize data structures. The input data is post processed
by call the 'mung' method on each object.
Parameters:
disc : disc.Disc() object with CD info
tracks : a list of disc.Track() objects with track info and
indexes for each portion of the track.
files : in-order list of WAV files associated with 'tracks'
"""
self._disc = disc
self._tracks = tracks
self._files = files
# modify data to workable formats
self._disc.mung()
# current track and "next" track or None
map(lambda t1,t2: t1.mung(t2), # update values in each track
self._tracks, itr.islice(self._tracks, 1, None))
def getToc(self):
"""Access method to return a text stream of the CUE data in TOC
format."""
toc = []
toc.extend( str(self._disc).split('\n') )
for trk in self._tracks:
toc.extend( str(trk).split('\n') )
# expand tabs to 4 spaces, strip trailing white space on each line
toc = [line.expandtabs(4).rstrip() for line in toc]
return toc
def modWavOffset(self,samples,tmp=False):
"""Optional method to correct the audio WAV data by shifting the
samples by a positive or negative offset. This can be used to
compensate for a write offset in a CD/DVD burner. If the 'tmp'
variable is True, all new WAV files will be created in the /tmp
directory.
Parameters:
samples : the number of samples to shift the audio data by.
This value can be negative or positive.
tmp : True or False, when True any new WAV files will be
created in /tmp.
"""
# create WavOffset object, initialize sample offset and progress output
wo = mt_wav.WavOffsetWriter( samples, mt_pb.ProgressBar,
('processing WAV files:',))
new_files = wo.execute( self._files, tmp )
# change all index file names to newly generated files
file_map = dict( zip(self._files,new_files) )
indexes = itr.imap(op.attrgetter('indexes'), self._tracks);
for idx in itr.chain(*indexes):
log.debug( "updating index file '%s'", idx.file_ )
idx.file_ = file_map[idx.file_]
##############################################################################
class _Parser(object):
"""A generic CD TOC parsing class. This class provides a foundation
of public and private methods to access and modify an audio CD
track listing.
Private Members:
_file_map
Dictionary to map input WAV files to actual files on the
system. The map is for use in cases where the defined file
name does not exactly match the file system WAV name.
_files
In-order list of WAV files that apply to the CD audio.
_find_wav
True or False, when True the WAV file must be found in the FS
or an exception is raised.
_wav_file_cache
WavFileCache object that can quickly find WAV files in the
local file system.
"""
def __init__(self, work_dir=os.curdir, find_wav=True):
"""Parses CUE file text data and initializes object data. The
primary output of this function is to create the '_disc' and
'_tracks' objects. All of the processed CUE data is stored in
these two structures.
Parameters:
find_wav : True/False, True causes exceptions to be raised
if a WAV file can not be found in the FS.
work_dir : Path location of the working directory.
"""
# init class options
self._file_map = {}
self._files = []
self._find_wav = find_wav
assert(work_dir)
self._wav_file_cache = mt_wav.WavFileCache(work_dir)
def parse( self, *args, **kwargs ):
"""Execute parsing steps on the input data. Must be implemented
by all subclasses. Returns a correctly formed ParseData
object."""
abstract
def _lookup_file_name(self,file_):
"""Attempts to return the path to a valid WAV file in the files
system using the input 'file_' value. If the WAV file can not be
found and '_find_wav' is True, then an exception is raised.
Parameter:
file : audio file name parsed from the CUE text.
"""
if file_ in self._file_map:
return self._file_map[file_]
else:
try: # attempt to find the WAV file
file_on_disk = self._wav_file_cache.lookup(file_)
except FileNotFoundError:
# file not found, but 'file_exists' indicates that the file
# must exists
if self._find_wav: raise
else: file_on_disk = file_
self._file_map[file_] = file_on_disk
return file_on_disk
##############################################################################
class CueParser(_Parser):
"""An audio CUE sheet text file parsing class. By matching the
known format of a CUE file, the relevant text information is
extracted and converted to a binary representation. The binary
representation is created by using combination of Disc, Track, and
TrackIndex objects. With the data, the CUE file can be re-created
or converted into a new format.
Constants:
The following constants contain all of the pattern matching
expressions for the CUE file parsing steps. The patterns are
combined and applied depending on the current step of the
scanning process.
_FILE_REGEX
Regex patterns for WAV file names.
_TRACK_REGEX
Regex patterns for Track commands.
_DISC_REGEX
Regex patterns for Disc info.
_TINFO_REGEX
Regex patterns for associated Track info.
Private Members:
_cue
List of processed CUE text data. The processing step removes
text comments and strips white spaces.
_file_lines
List of CUE file line numbers and WAV files tuple pairs for
each WAV file in the CUE.
_track_lines
List used as a lookup table, indexed by track number, to map
each CUE track to its line number in the CUE text.
_part_search
RegexStore list of regex searches for first partial scan of
the TOC text.
_disc_search
RegexStore list of regex searches for disc info scan of the
TOC.
_tinfo_search
RegexStore list of regex searches for track info scan of the
TOC.
"""
# file name search pattern used in all other searches
_FILE_REGEX = [
('file', r"""
^\s*FILE # FILE
\s+"(.*)" # 'file name' in quotes
\s+WAVE$ # WAVE
""" )]
# create search patterns for lookup table parsing
_TRACK_REGEX = [
('track', r"""
^\s*TRACK # TRACK
\s+(\d+) # track 'number'
\s+(AUDIO|MODE.*)$ # AUDIO or MODEx/xxxx
""")]
# create search patterns for disc parsing
_DISC_REGEX = [
('rem' , r"""
^\s*REM # match 'REM'
\s+(\w+) # match 'key'
\s+(.*)$ # match 'value'
"""),
('quote', r"""
^\s*(\w+) # match 'key'
\s+"(.*)"$ # match 'value' surrounded with double quotes
"""),
('catalog', r"""
^\s*(CATALOG) # CATALOG
\s+(\d{13})$ # catalog 'value'
""")]
# create search patterns for track parsing
_TINFO_REGEX = [
('index', r"""
^\s*INDEX # INDEX
\s+(\d+) # 'index number'
\s+(\d{2}:\d{2}:\d{2})$ # 'index time'
"""),
('quote', r"""
^\s*(PERFORMER|TITLE) # 'key'
\s+"(.*)"$ # 'value' surrounded with double quotes
"""),
('named', r"""
^\s*(ISRC|PREGAP) # a known CUE command
\s+(.*)$ # single arg
"""),
('flag', r"""
^\s*FLAGS # a FLAG command
\s+(.*)$ # one or more flags
""")]
def __init__(self, cue_dir=os.curdir, find_wav=True):
"""Initializes object data.
Parameters:
cue_dir : Path location of the CUE file's directory.
find_wav : True/False, True causes exceptions to be raised
if a WAV file can not be found in the FS.
"""
assert(cue_dir)
super(CueParser,self).__init__(cue_dir, find_wav)
self._part_search = _RegexStore( dict(self._FILE_REGEX + \
self._TRACK_REGEX) )
self._disc_search = _RegexStore( dict(self._FILE_REGEX + \
self._DISC_REGEX) )
self._tinfo_search = _RegexStore( dict(self._FILE_REGEX + \
self._TINFO_REGEX + self._TRACK_REGEX))
def parse(self, fh):
"""Parses CUE file text data and return a ParseData object. The
primary output of this function is to initialize the 'disc' and
'tracks' member variables of ParseData. All of the processed
CUE data is stored in these two structures.
Parameters:
fh : An open file handle used to read the CUE text
data
"""
# create a list of regular expressions before starting the parse
rem_regex = re.compile( r'^\s*REM\s+COMMENT' )
# parse disc into memory, ignore comments
self._cue = [line.strip() for line in fh if not rem_regex.search(line)]
if not len(self._cue):
raise EmptyCueData
self._build_lookup_tbl()
# create data objects for CUE info
disc = self._parse_disc()
return ParseData( disc, self._parse_all_tracks(disc), self._files )
def _active_file(self,trk_idx):
"""Returns the previous WAV file used before the start of
'trk_idx'."""
tline = self._track_lines[trk_idx] # line number track begins at
# return the first wav file found that is at a lower line than 'tline'
return itr.ifilter(lambda (x,y): x < tline,
reversed(self._file_lines)).next()[1]
def _build_lookup_tbl(self):
"""Helper function to create the '_files', '_file_lines' and
'_track_lines' lists structures required before the class
initialization is complete."""
# return an iterator of tuples with line nums, re.match name, and
# re.match data
matchi = itr.chain(*itr.imap( self._part_search.match, self._cue ))
num_matchi = itr.izip( itr.count(), matchi, matchi )
# create list of valid matches
matches = filter(op.itemgetter(2), num_matchi)
# iterator of 'file' matches
files = filter(lambda (i,key,match): key == 'file', matches)
# create a list of 'wav file name'
self._files = map( lambda m: self._lookup_file_name(m.group(1)),
itr.imap(op.itemgetter(2),files) )
# create a tuple of (i,wav file name)
self._file_lines = zip( itr.imap(op.itemgetter(0),files), self._files )
# iterator of 'track' matches
tracks = itr.ifilter( lambda (i,key,match): key == 'track', matches)
self._track_lines = map(op.itemgetter(0), tracks)
def _parse_all_tracks(self,disc):
"""Return a list of Track objects that contain the track
information from the fully parsed CUE text data."""
return list( itr.imap( self._parse_track, range(len(self._track_lines)),
itr.repeat(disc)) )
def _parse_disc(self):
"""Return a Disc object that contains the disc information from
the fully parsed CUE text data. This method implements the
'disc' scanning steps of the parser."""
disc_ = mt_disc.Disc()
# splice disc data from the cue list, and return an iterator of tuples
# returned by re.match
cue_data = map( self._disc_search.match,
itr.islice(self._cue, 0, self._track_lines[0]) )
# raise error if unkown match is found
if filter( lambda (key,match): not match, cue_data):
raise ParseError, "Unmatched pattern in stream: '%s'" % txt
# ignore 'file' matches
for key,value in \
[match.groups() for key,match in cue_data if key != 'file']:
key = key.lower()
if hasattr(disc_,key):
# add match value to Disc object
setattr(disc_, key, value.strip())
else:
raise ParseError, "Unmatched keyword in stream: '%s'" % txt
return disc_
def _parse_track(self, num, disc):
"""Return a Track object that contains a single track element
from the parsed CUE text data. This method implements the
'track' scanning steps of the parser.
Parameters:
num : the track index of the track to parse. The first
track starts at 0.
"""
# splice track data
if num+1 < len(self._track_lines):
data = itr.islice(self._cue, self._track_lines[num],
self._track_lines[num+1])
else:
data = itr.islice(self._cue, self._track_lines[num], None)
# lookup the previous file name
file_name = self._active_file(num)
# <-- This is the main track parsing step --->
trk = mt_disc.Track(num+1)
# Every CUE file has list of FILE, TRACK, and INDEX commands. The FILE
# commands specify the active FILE for the following INDEX commands. The
# TRACK indicate the logical beginning of a new TRACK info list with TITLE
# and PERFORMER tags.
cue_data = map( self._tinfo_search.match, data )
# raise error if unkown match is found
if filter( lambda (key,match): not match, cue_data):
raise ParseError, "Unmatched pattern in stream: '%s'" % txt
for re_key,match in cue_data:
if re_key == 'track':
assert trk.num == int(match.group(1))
if match.group(2) != 'AUDIO':
trk.is_data = True
disc.setMultisession() # disc is multi-session
elif re_key == 'file':
# update file name
file_name = self._lookup_file_name(match.group(1))
elif re_key == 'index':
# track INDEX, file_name is associated with the index
idx_num,time = match.groups()
i = mt_disc.TrackIndex( idx_num, time, file_name )
trk.appendIdx( i )
elif re_key in ['quote','named']:
# track information (PERFORMER, TITLE, ...)
key,value = match.groups()
key = key.lower()
if hasattr(trk,key): # add match value to Disc object
setattr(trk, key, value.strip())
else:
raise ParseError, "Unmatched keyword in stream: '%s'" % txt
elif re_key == 'flag':
for f in itr.ifilter( lambda x: x in ['DCP','4CH','PRE'],
match.group(1).split() ):
if f == '4CH': f = 'four_ch' # change '4CH' flag name
setattr(trk, f.lower(), True)
else: # catch unhandled patterns
raise ParseError, "Unmatched pattern in stream: '%s'" % txt
return trk
##############################################################################
class WavParser(_Parser):
"""A simple parser object that uses a list of WAV files to create a
CD TOC. The class assumes that each WAV file is an individual
track, in ascending order."""
def __init__(self, work_dir=os.curdir, find_wav=True):
"""Initialize the parser. The primary output of this function is
to create the '_disc' and '_tracks' objects.
Parameters:
work_dir : Path location of the CUE file's directory.
find_wav : True/False, True causes exceptions to be raised
if a WAV file can not be found in the FS.
"""
# init class options
assert( work_dir )
super(WavParser,self).__init__(work_dir, find_wav)
def parse( self, wav_files):
"""Parses a list of WAV files and return a ParseData object. The
primary output of this function is to initialize the 'disc' and
'tracks' member variables of ParseData. All of the processed
CUE data is stored in these two structures.
Parameters:
wav_files : A list of WAV files to add to the TOC
"""
self._files = map(self._lookup_file_name, wav_files)
# return a new Track object with a single Index using 'file_'
def mk_track((idx,file_)):
# create a new track for the WAV file
trk = mt_disc.Track(idx+1)
# add the WAV file to the first index in the track
trk.appendIdx( mt_disc.TrackIndex(1,0,file_) )
return trk
# return a new ParseData object with empy Disc and complete Track list
return ParseData( mt_disc.Disc(),
map( mk_track, enumerate(self._files)),
self._files )
##############################################################################
class _RegexStore(object):
"""A helper class that simplifies the management of regular
expressions. The RegexStore class is used to apply a list of
regular expressions to a single text stream. The first matching
regular expression is returned.
Private Members:
_searches
Dictionary of compiled regex's keyed by a user supplied
string value.
"""
def __init__(self, pat_dict):
"""Initialize the '_searches' dictionary using the 'pat_dict'
parameter.
Parameters:
pat_dict : A dictionary of regular expression strings. The
regex value is compiled and stored in the
'_searches' dictionary, keyed by the original
'pat_dict' key.
"""
# build RegEx searches
re_searches = [re.compile(pat, re.VERBOSE) for pat in pat_dict.values()]
self._searches = dict(zip(pat_dict.keys(),re_searches))
def match( self, text ):
"""Applies the 'text' parameter to a dictionary of regex
searches. The output of the first matching regex is returned
along with the matching regex's dictionary key. The return is
data is contained in a tuple, with the key as the first element.
Parameters:
text : text string applied to a list of regex searches.
"""
match_all = itr.starmap( lambda key,cre: (key,cre.search(text)),
self._searches.items() )
try:
return itr.ifilter(op.itemgetter(1), match_all).next()
except StopIteration, e:
return ('',None)
|
epronk/mktoc
|
src/mktoc/parser.py
|
Python
|
gpl-3.0
| 21,169
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
# Author: Thomi Richards
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
from __future__ import absolute_import
from autopilot.matchers import Eventually
from testtools.matchers import Equals, NotEquals, GreaterThan, MatchesPredicate
from time import sleep
from unity.tests import UnityTestCase
import gettext
class CommandScopeSearchTests(UnityTestCase):
"""Test the command scope search bahavior."""
def setUp(self):
super(CommandScopeSearchTests, self).setUp()
gettext.install("unity-scope-applications")
def tearDown(self):
self.unity.dash.ensure_hidden()
super(CommandScopeSearchTests, self).tearDown()
def wait_for_category(self, scope, group):
"""Method to wait for a specific category"""
get_scope_fn = lambda: scope.get_category_by_name(group)
self.assertThat(get_scope_fn, Eventually(NotEquals(None), timeout=20))
return get_scope_fn()
def test_no_results(self):
"""An empty string should get no results."""
self.unity.dash.reveal_command_scope()
command_scope = self.unity.dash.get_current_scope()
if self.unity.dash.search_string != "":
self.keyboard.press_and_release("Delete")
self.assertThat(self.unity.dash.search_string, Eventually(Equals("")))
results_category = self.wait_for_category(command_scope, _("Results"))
self.assertThat(results_category.is_visible, Eventually(Equals(False)))
def test_results_category_appears(self):
"""Results category must appear when there are some results."""
self.unity.dash.reveal_command_scope()
command_scope = self.unity.dash.get_current_scope()
# lots of apps start with 'a'...
self.keyboard.type("a")
self.assertThat(self.unity.dash.search_string, Eventually(Equals("a")))
results_category = self.wait_for_category(command_scope, _("Results"))
self.assertThat(results_category.is_visible, Eventually(Equals(True)))
def test_result_category_actually_contains_results(self):
"""With a search string of 'a', the results category must contain some results."""
self.unity.dash.reveal_command_scope()
command_scope = self.unity.dash.get_current_scope()
# lots of apps start with 'a'...
self.keyboard.type("a")
self.assertThat(self.unity.dash.search_string, Eventually(Equals("a")))
results_category = self.wait_for_category(command_scope, _("Results"))
self.assertThat(lambda: len(results_category.get_results()), Eventually(GreaterThan(0), timeout=20))
def test_run_before_refresh(self):
"""Hitting enter before view has updated results must run the correct command."""
if self.process_manager.app_is_running("Text Editor"):
self.process_manager.close_all_app("Text Editor")
sleep(1)
self.unity.dash.reveal_command_scope()
self.keyboard.type("g")
sleep(1)
self.keyboard.type("edit", 0.1)
self.keyboard.press_and_release("Enter", 0.1)
self.addCleanup(self.process_manager.close_all_app, "Text Editor")
app_found = self.process_manager.wait_until_application_is_running("gedit.desktop", 5)
self.assertTrue(app_found)
def test_ctrl_tab_switching(self):
"""Pressing Ctrl+Tab after launching command scope must switch to Home scope."""
self.unity.dash.reveal_command_scope()
self.keybinding("dash/lens/next")
self.assertThat(self.unity.dash.active_scope, Eventually(Equals("home.scope")))
def test_ctrl_shift_tab_switching(self):
"""Pressing Ctrl+Shift+Tab after launching command scope must switch to Photos or Social scope (Social can be hidden by default)."""
self.unity.dash.reveal_command_scope()
self.keybinding("dash/lens/prev")
self.assertThat(self.unity.dash.active_scope, Eventually(MatchesPredicate(lambda x: x in ["photos.scope", "social.scope"], '%s is not last scope')))
|
foer/linuxmuster-client-unity
|
tests/autopilot/unity/tests/test_command_lens.py
|
Python
|
gpl-3.0
| 4,262
|
# coding: utf-8
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc & contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: contact@tiger-222.fr (Mickaël Schoentgen)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
import os
import threading
from functools import partial
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=os.stat, listdir=os.scandir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def on_thread_start(self):
self._snapshot = self._take_snapshot()
def queue_events(self, timeout):
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
try:
new_snapshot = self._take_snapshot()
except OSError:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
return
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.scandir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
|
snakeleon/YouCompleteMe-x64
|
third_party/ycmd/third_party/watchdog_deps/watchdog/src/watchdog/observers/polling.py
|
Python
|
gpl-3.0
| 4,798
|
from datetime import datetime
import logging
from dns import resolver
from ruxit.api.base_plugin import RemoteBasePlugin
from dynatrace import Dynatrace
from dynatrace.environment_v1.synthetic_third_party import SYNTHETIC_EVENT_TYPE_OUTAGE
log = logging.getLogger(__name__)
class DNSExtension(RemoteBasePlugin):
def initialize(self, **kwargs):
# The Dynatrace API client
self.dt_client = Dynatrace(self.config.get("api_url"), self.config.get("api_token"), log=log, proxies=self.build_proxy_url())
self.executions = 0
self.failures_detected = 0
def build_proxy_url(self):
proxy_address = self.config.get("proxy_address")
proxy_username = self.config.get("proxy_username")
proxy_password = self.config.get("proxy_password")
if proxy_address:
protocol, address = proxy_address.split("://")
proxy_url = f"{protocol}://"
if proxy_username:
proxy_url += proxy_username
if proxy_password:
proxy_url += f":{proxy_password}"
proxy_url += f"@{address}"
return {"https": proxy_url}
return {}
def query(self, **kwargs) -> None:
log.setLevel(self.config.get("log_level"))
dns_server = self.config.get("dns_server")
host = self.config.get("host")
step_title = f"{host} (DNS: {dns_server})"
test_title = self.config.get("test_name") if self.config.get("test_name") else step_title
location = self.config.get("test_location") if self.config.get("test_location") else "ActiveGate"
location_id = location.replace(" ", "_").lower()
frequency = int(self.config.get("frequency")) if self.config.get("frequency") else 15
failure_count = self.config.get("failure_count", 1)
if self.executions % frequency == 0:
success, response_time = test_dns(dns_server, host)
log.info(f"DNS test, DNS server: {dns_server}, host: {host}, success: {success}, time: {response_time} ")
if not success:
self.failures_detected += 1
if self.failures_detected < failure_count:
log.info(f"The result was: {success}. Attempt {self.failures_detected}/{failure_count}, not reporting yet")
success = True
else:
self.failures_detected = 0
self.dt_client.third_part_synthetic_tests.report_simple_thirdparty_synthetic_test(
engine_name="DNS",
timestamp=datetime.now(),
location_id=location_id,
location_name=location,
test_id=self.activation.entity_id,
test_title=test_title,
step_title=step_title,
schedule_interval=frequency * 60,
success=success,
response_time=response_time,
edit_link=f"#settings/customextension;id={self.plugin_info.name}",
icon_url="https://raw.githubusercontent.com/Dynatrace/dynatrace-api/master/third-party-synthetic/active-gate-extensions/extension-third-party-dns/dns.png",
)
self.dt_client.third_part_synthetic_tests.report_simple_thirdparty_synthetic_test_event(
test_id=self.activation.entity_id,
name=f"DNS lookup failed for {step_title}",
location_id=location_id,
timestamp=datetime.now(),
state="open" if not success else "resolved",
event_type=SYNTHETIC_EVENT_TYPE_OUTAGE,
reason=f"DNS lookup failed for {step_title}",
engine_name="DNS",
)
self.executions += 1
def test_dns(dns_server: str, host: str) -> (bool, int):
res = resolver.Resolver(configure=False)
res.nameservers = [dns_server]
res.lifetime = res.timeout = 2
start = datetime.now()
try:
res.query(host, "A")
except Exception as e:
log.error(f"Failed executing the DNS test: {e}")
return False, int((datetime.now() - start).total_seconds() * 1000)
return True, int((datetime.now() - start).total_seconds() * 1000)
|
ruxit/data-export-api
|
third-party-synthetic/active-gate-extensions/extension-third-party-dns/src/dns_extension.py
|
Python
|
bsd-3-clause
| 4,210
|
# -*- coding: utf-8 -*-
import sys
from django.db import migrations
from django.db import migrations, transaction
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.db.models import F
def set_initial_value_of_is_private_flag(
apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
UserMessage = apps.get_model("zerver", "UserMessage")
Message = apps.get_model("zerver", "Message")
if not Message.objects.exists():
return
i = 0
# Total is only used for the progress bar
total = Message.objects.filter(recipient__type__in=[1, 3]).count()
processed = 0
print("\nStart setting initial value for is_private flag...")
sys.stdout.flush()
while True:
range_end = i + 10000
# Can't use [Recipient.PERSONAL, Recipient.HUDDLE] in migration files
message_ids = list(Message.objects.filter(recipient__type__in=[1, 3],
id__gt=i,
id__lte=range_end).values_list("id", flat=True).order_by("id"))
count = UserMessage.objects.filter(message_id__in=message_ids).update(flags=F('flags').bitor(UserMessage.flags.is_private))
if count == 0 and range_end >= Message.objects.last().id:
break
i = range_end
processed += len(message_ids)
if total != 0:
percent = round((processed / total) * 100, 2)
else:
percent = 100.00
print("Processed %s/%s %s%%" % (processed, total, percent))
sys.stdout.flush()
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0181_userprofile_change_emojiset'),
]
operations = [
migrations.RunPython(set_initial_value_of_is_private_flag,
reverse_code=migrations.RunPython.noop),
]
|
jackrzhang/zulip
|
zerver/migrations/0182_set_initial_value_is_private_flag.py
|
Python
|
apache-2.0
| 1,962
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
# Bring everything from c++ module to global namespace.
from moose._moose import *
# Bring everything from moose.py to global namespace.
# IMP: It will overwrite any c++ function with the same name. We can override
# some C++ here.
from moose.moose import *
from moose.server import *
# SBML and NML2 support.
from moose.model_utils import *
# create a shorthand for version() call here.
__version__ = version()
# C++ core override
from moose.wrapper import *
# Import moose test.
from moose.moose_test import test
|
BhallaLab/moose-core
|
pymoose/__init__.py
|
Python
|
gpl-3.0
| 613
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Environment.migrate_environment'
db.add_column(u'physical_environment', 'migrate_environment',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'migrate_to', null=True, to=orm['physical.Environment']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Environment.migrate_environment'
db.delete_column(u'physical_environment', 'migrate_environment_id')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0049_auto__add_field_environment_migrate_environment.py
|
Python
|
bsd-3-clause
| 16,738
|
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
# Declaration of Username and Password
username = 'admin'
password = 'Test1234$'
"""
Method to replicate
~~~~~~~~~~~~~~~~~~~
1. Bring up a new instance of NearBeach (grab from fixtures)
2. Try and log in as the admin user
Expected Results
~~~~~~~~~~~~~~~~
User will log in with no issues, system will create all of the user's permission sets and groups
"""
def login_user(c: object, self: object) -> object:
response = c.post(
reverse('login'),
self.credentials,
follow=True,
)
self.assertTrue(response.context['user'].is_active)
class NewInstanceLoginTest(TestCase):
fixtures = ['NearBeach_no_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_admin_login(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can open up the project
response = c.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
|
robotichead/NearBeach
|
NearBeach/tests/tests_specific_bugs/test_new_instance.py
|
Python
|
mit
| 1,174
|
# sql/compiler.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import itertools
import re
from . import coercions
from . import crud
from . import elements
from . import functions
from . import operators
from . import roles
from . import schema
from . import selectable
from . import sqltypes
from .. import exc
from .. import util
RESERVED_WORDS = set(
[
"all",
"analyse",
"analyze",
"and",
"any",
"array",
"as",
"asc",
"asymmetric",
"authorization",
"between",
"binary",
"both",
"case",
"cast",
"check",
"collate",
"column",
"constraint",
"create",
"cross",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_user",
"default",
"deferrable",
"desc",
"distinct",
"do",
"else",
"end",
"except",
"false",
"for",
"foreign",
"freeze",
"from",
"full",
"grant",
"group",
"having",
"ilike",
"in",
"initially",
"inner",
"intersect",
"into",
"is",
"isnull",
"join",
"leading",
"left",
"like",
"limit",
"localtime",
"localtimestamp",
"natural",
"new",
"not",
"notnull",
"null",
"off",
"offset",
"old",
"on",
"only",
"or",
"order",
"outer",
"overlaps",
"placing",
"primary",
"references",
"right",
"select",
"session_user",
"set",
"similar",
"some",
"symmetric",
"table",
"then",
"to",
"trailing",
"true",
"union",
"unique",
"user",
"using",
"verbose",
"when",
"where",
]
)
LEGAL_CHARACTERS = re.compile(r"^[A-Z0-9_$]+$", re.I)
LEGAL_CHARACTERS_PLUS_SPACE = re.compile(r"^[A-Z0-9_ $]+$", re.I)
ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(["$"])
FK_ON_DELETE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_ON_UPDATE = re.compile(
r"^(?:RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT)$", re.I
)
FK_INITIALLY = re.compile(r"^(?:DEFERRED|IMMEDIATE)$", re.I)
BIND_PARAMS = re.compile(r"(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])", re.UNICODE)
BIND_PARAMS_ESC = re.compile(r"\x5c(:[\w\$]*)(?![:\w\$])", re.UNICODE)
BIND_TEMPLATES = {
"pyformat": "%%(%(name)s)s",
"qmark": "?",
"format": "%%s",
"numeric": ":[_POSITION]",
"named": ":%(name)s",
}
OPERATORS = {
# binary
operators.and_: " AND ",
operators.or_: " OR ",
operators.add: " + ",
operators.mul: " * ",
operators.sub: " - ",
operators.div: " / ",
operators.mod: " % ",
operators.truediv: " / ",
operators.neg: "-",
operators.lt: " < ",
operators.le: " <= ",
operators.ne: " != ",
operators.gt: " > ",
operators.ge: " >= ",
operators.eq: " = ",
operators.is_distinct_from: " IS DISTINCT FROM ",
operators.isnot_distinct_from: " IS NOT DISTINCT FROM ",
operators.concat_op: " || ",
operators.match_op: " MATCH ",
operators.notmatch_op: " NOT MATCH ",
operators.in_op: " IN ",
operators.notin_op: " NOT IN ",
operators.comma_op: ", ",
operators.from_: " FROM ",
operators.as_: " AS ",
operators.is_: " IS ",
operators.isnot: " IS NOT ",
operators.collate: " COLLATE ",
# unary
operators.exists: "EXISTS ",
operators.distinct_op: "DISTINCT ",
operators.inv: "NOT ",
operators.any_op: "ANY ",
operators.all_op: "ALL ",
# modifiers
operators.desc_op: " DESC",
operators.asc_op: " ASC",
operators.nullsfirst_op: " NULLS FIRST",
operators.nullslast_op: " NULLS LAST",
}
FUNCTIONS = {
functions.coalesce: "coalesce",
functions.current_date: "CURRENT_DATE",
functions.current_time: "CURRENT_TIME",
functions.current_timestamp: "CURRENT_TIMESTAMP",
functions.current_user: "CURRENT_USER",
functions.localtime: "LOCALTIME",
functions.localtimestamp: "LOCALTIMESTAMP",
functions.random: "random",
functions.sysdate: "sysdate",
functions.session_user: "SESSION_USER",
functions.user: "USER",
functions.cube: "CUBE",
functions.rollup: "ROLLUP",
functions.grouping_sets: "GROUPING SETS",
}
EXTRACT_MAP = {
"month": "month",
"day": "day",
"year": "year",
"second": "second",
"hour": "hour",
"doy": "doy",
"minute": "minute",
"quarter": "quarter",
"dow": "dow",
"week": "week",
"epoch": "epoch",
"milliseconds": "milliseconds",
"microseconds": "microseconds",
"timezone_hour": "timezone_hour",
"timezone_minute": "timezone_minute",
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: "UNION",
selectable.CompoundSelect.UNION_ALL: "UNION ALL",
selectable.CompoundSelect.EXCEPT: "EXCEPT",
selectable.CompoundSelect.EXCEPT_ALL: "EXCEPT ALL",
selectable.CompoundSelect.INTERSECT: "INTERSECT",
selectable.CompoundSelect.INTERSECT_ALL: "INTERSECT ALL",
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
execution_options = util.immutabledict()
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
def __init__(
self,
dialect,
statement,
bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict(),
):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.preparer = self.preparer._with_schema_translate(
schema_translate_map
)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated(
"0.7",
"The :meth:`.Compiled.compile` method is deprecated and will be "
"removed in a future release. The :class:`.Compiled` object "
"now runs its compilation within the constructor, and this method "
"does nothing.",
)
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
if self.can_execute:
return connection._execute_compiled(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self.statement)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ""
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.",
code="2afi",
)
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r"visit_\w+"
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
# this was a Visitable, but to allow accurate detection of
# column elements this is actually a column element
class _CompileLabel(elements.ColumnElement):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = "label"
__slots__ = "element", "name"
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
contains_expanding_parameters = False
"""True if we've encountered bindparam(..., expanding=True).
These need to be converted before execution time against the
string statement.
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextualSelect.
"""
_numeric_binds = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
insert_prefetch = update_prefetch = ()
def __init__(
self, dialect, statement, column_keys=None, inline=False, **kwargs
):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, "inline", False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = (
dialect.label_length or dialect.max_identifier_length
)
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and self._numeric_binds:
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns,
self._ordered_columns,
)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry["need_result_map_for_nested"] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop("need_result_map_for_nested")
self._result_columns, self._ordered_columns = (
result_columns,
ordered_columns,
)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r"\[_POSITION\]", lambda m: str(util.next(poscount)), self.string
)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value)
for key, value in (
(
self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect),
)
for bindparam in self.bind_names
)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d"
% (bindparam.key, _group_number),
code="cd3x",
)
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key,
code="cd3x",
)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if (
order_by_elem is not None
and order_by_elem.name in resolve_dict
and order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]
)
):
kwargs[
"render_label_as_label"
] = element.element._order_by_label_element
return self.process(
element.element,
within_columns_clause=within_columns_clause,
**kwargs
)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs
):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(element._text_clause)
selectable = self.stack[-1]["selectable"]
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
coercions._no_text_coercion(
element.element,
extra="Can't resolve label reference for ORDER BY / GROUP BY.",
exc_cls=exc.CompileError,
)
else:
kwargs["render_label_as_label"] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs
)
def visit_label(
self,
label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw
):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (
within_columns_clause and not within_label_clause
)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname) + label._alt_names,
label.type,
)
return (
label.element._compiler_dispatch(
self,
within_columns_clause=True,
within_label_clause=True,
**kw
)
+ OPERATORS[operators.as_]
+ self.preparer.format_label(label, labelname)
)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw
)
def _fallback_column_name(self, column):
raise exc.CompileError(
"Cannot compile Column object until " "its 'name' is assigned."
)
def visit_column(
self, column, add_to_result_map=None, include_table=True, **kwargs
):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name, orig_name, (column, name, column.key), column.type
)
if is_literal:
# note we are not currently accommodating for
# literal_column(quoted_name('ident', True)) here
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = (
self.preparer.quote_schema(effective_schema) + "."
)
else:
schema_prefix = ""
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + self.preparer.quote(tablename) + "." + name
def visit_collation(self, element, **kw):
return self.preparer.format_collation(element.collation)
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw["type_expression"] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace("%", "%%")
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam, self.post_process_text(textclause.text)
),
)
def visit_textual_select(
self, taf, compound_index=None, asfrom=False, **kw
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = (
toplevel
or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
or entry.get("need_result_map_for_nested", False)
)
if populate_result_map:
self._ordered_columns = (
self._textual_ordered_columns
) = taf.positional
for c in taf.column_args:
self.process(
c,
within_columns_clause=True,
add_to_result_map=self._add_to_result_map,
)
return self.process(taf.element, **kw)
def visit_null(self, expr, **kw):
return "NULL"
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "true"
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return "false"
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
text = sep.join(
s
for s in (
c._compiler_dispatch(self, **kw) for c in clauselist.clauses
)
if s
)
if clauselist._tuple_values and self.dialect.tuple_in_values:
text = "VALUES " + text
return text
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += (
"WHEN "
+ cond._compiler_dispatch(self, **kwargs)
+ " THEN "
+ result._compiler_dispatch(self, **kwargs)
+ " "
)
if clause.else_ is not None:
x += (
"ELSE " + clause.else_._compiler_dispatch(self, **kwargs) + " "
)
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % (
cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs),
)
def _format_frame_clause(self, range_, **kw):
return "%s AND %s" % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[0])), **kw),)
if range_[0] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[0]), **kw),),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW"
if range_[1] is elements.RANGE_CURRENT
else "%s PRECEDING"
% (self.process(elements.literal(abs(range_[1])), **kw),)
if range_[1] < 0
else "%s FOLLOWING"
% (self.process(elements.literal(range_[1]), **kw),),
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs
)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs
)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
" ".join(
[
"%s BY %s"
% (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
("PARTITION", over.partition_by),
("ORDER", over.order_by),
)
if clause is not None and len(clause)
]
+ ([range_] if range_ else [])
),
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs),
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs),
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field,
extract.expr._compiler_dispatch(self, **kwargs),
)
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, None)
if name:
if func._has_args:
name += "%(expr)s"
else:
name = func.name
name = (
self.preparer.quote(name)
if self.preparer._requires_quotes_illegal_chars(name)
else name
)
name = name + "%(expr)s"
return ".".join(
[
(
self.preparer.quote(tok)
if self.preparer._requires_quotes_illegal_chars(tok)
else tok
)
for tok in func.packagenames
]
+ [name]
) % {"expr": self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence, **kw):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments."
% self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(
self, cs, asfrom=False, compound_index=0, **kwargs
):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or (
compound_index == 0
and entry.get("need_result_map_for_compound", False)
)
self.stack.append(
{
"correlate_froms": entry["correlate_froms"],
"asfrom_froms": entry["asfrom_froms"],
"selectable": cs,
"need_result_map_for_compound": need_result_map,
}
)
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(
c._compiler_dispatch(
self, asfrom=asfrom, compound_index=i, **kwargs
)
for i, c in enumerate(cs.selects)
)
)
kwargs["include_table"] = False
text += self.group_by_clause(cs, **dict(asfrom=asfrom, **kwargs))
text += self.order_by_clause(cs, **kwargs)
text += (
(cs._limit_clause is not None or cs._offset_clause is not None)
and self.limit_clause(cs, **kwargs)
or ""
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
return text
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__,
qualifier1,
"_" + qualifier2 if qualifier2 else "",
)
return getattr(self, attrname, None)
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously"
)
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator"
)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw
)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier"
)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw
)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier"
)
def visit_istrue_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if (
element._is_implicitly_boolean
or self.dialect.supports_native_boolean
):
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op
)
def _emit_empty_in_warning(self):
util.warn(
"The IN-predicate was invoked with an "
"empty sequence. This results in a "
"contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative "
"strategies for improved performance."
)
def visit_empty_in_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 != 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left != binary.left)
def visit_empty_notin_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 = 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left == binary.left)
def visit_empty_set_expr(self, element_types):
raise NotImplementedError(
"Dialect '%s' does not support empty set expression."
% self.dialect.name
)
def visit_binary(
self, binary, override_operator=None, eager_grouping=False, **kw
):
# don't allow "? = ?" to render
if (
self.ansi_bind_rules
and isinstance(binary.left, elements.BindParameter)
and isinstance(binary.right, elements.BindParameter)
):
kw["literal_binds"] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_function_as_comparison_op_binary(self, element, operator, **kw):
return self.process(element.sql_function, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return (
self.process(binary.left, **kw)
+ " %% "
+ self.process(binary.right, **kw)
)
else:
return (
self.process(binary.left, **kw)
+ " % "
+ self.process(binary.right, **kw)
)
def visit_custom_op_binary(self, element, operator, **kw):
kw["eager_grouping"] = operator.eager_grouping
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw
)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw
)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw
)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw
):
_in_binary = kw.get("_in_binary", False)
kw["_in_binary"] = True
text = (
binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
+ opstring
+ binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw
)
)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return "%s LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "%s NOT LIKE %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return "lower(%s) NOT LIKE lower(%s)" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
) + (
" ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape
else ""
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC " if symmetric else " BETWEEN ", **kw
)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary,
" NOT BETWEEN SYMMETRIC " if symmetric else " NOT BETWEEN ",
**kw
)
def visit_bindparam(
self,
bindparam,
within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
bind_expression = impl.bind_expression(bindparam)
return self.process(
bind_expression,
skip_bind_expression=True,
within_columns_clause=within_columns_clause,
literal_binds=literal_binds,
**kwargs
)
if literal_binds or (within_columns_clause and self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError(
"Bind parameter '%s' without a "
"renderable value not allowed here." % bindparam.key
)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs
)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (
existing.unique or bindparam.unique
) and not existing.proxy_set.intersection(bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name"
% bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(
name, expanding=bindparam.expanding, **kwargs
)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value
)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = (
anonname[0 : max(self.label_length - 6, 0)]
+ "_"
+ hex(counter)[2:]
)
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(" ", 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(
self, name, positional_names=None, expanding=False, **kw
):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
if expanding:
self.contains_expanding_parameters = True
return "([EXPANDING_%s])" % name
else:
return self.bindtemplate % {"name": name}
def visit_cte(
self,
cte,
asfrom=False,
ashint=False,
fromhints=None,
visiting_cte=None,
**kwargs
):
self._init_cte_state()
kwargs["visiting_cte"] = cte
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
is_new_cte = True
embedded_in_current_named_cte = False
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
embedded_in_current_named_cte = visiting_cte is existing_cte
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
is_new_cte = False
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" % cte_name
)
if asfrom or is_new_cte:
if cte._cte_alias is not None:
pre_alias_cte = cte._cte_alias
cte_pre_alias_name = cte._cte_alias.name
if isinstance(cte_pre_alias_name, elements._truncated_label):
cte_pre_alias_name = self._truncated_identifier(
"alias", cte_pre_alias_name
)
else:
pre_alias_cte = cte
cte_pre_alias_name = None
if is_new_cte:
self.ctes_by_name[cte_name] = cte
if (
"autocommit" in cte.element._execution_options
and "autocommit" not in self.execution_options
):
self.execution_options = self.execution_options.union(
{
"autocommit": cte.element._execution_options[
"autocommit"
]
}
)
if pre_alias_cte not in self.ctes:
self.visit_cte(pre_alias_cte, **kwargs)
if not cte_pre_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.element, selectable.Select):
col_source = cte.element
elif isinstance(cte.element, selectable.CompoundSelect):
col_source = cte.element.selects[0]
else:
assert False
recur_cols = [
c
for c in util.unique_list(col_source.inner_columns)
if c is not None
]
text += "(%s)" % (
", ".join(
self.preparer.format_column(ident)
for ident in recur_cols
)
)
if self.positional:
kwargs["positional_names"] = self.cte_positional[cte] = []
assert kwargs.get("subquery", False) is False
text += " AS \n(%s)" % (
cte.element._compiler_dispatch(
self, asfrom=True, **kwargs
),
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs
)
self.ctes[cte] = text
if asfrom:
if not is_new_cte and embedded_in_current_named_cte:
return self.preparer.format_alias(cte, cte_name)
if cte_pre_alias_name:
text = self.preparer.format_alias(cte, cte_pre_alias_name)
if self.preparer._requires_quotes(cte_name):
cte_name = self.preparer.quote(cte_name)
text += self.get_render_as_alias_suffix(cte_name)
return text
else:
return self.preparer.format_alias(cte, cte_name)
def visit_alias(
self,
alias,
asfrom=False,
ashint=False,
iscrud=False,
fromhints=None,
subquery=False,
lateral=False,
enclosing_alias=None,
**kwargs
):
if enclosing_alias is not None and enclosing_alias.element is alias:
inner = alias.element._compiler_dispatch(
self,
asfrom=asfrom,
ashint=ashint,
iscrud=iscrud,
fromhints=fromhints,
lateral=lateral,
enclosing_alias=alias,
**kwargs
)
if subquery and (asfrom or lateral):
inner = "(%s)" % (inner,)
return inner
else:
enclosing_alias = kwargs["enclosing_alias"] = alias
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
inner = alias.element._compiler_dispatch(
self, asfrom=True, lateral=lateral, **kwargs
)
if subquery:
inner = "(%s)" % (inner,)
ret = inner + self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name)
)
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(
ret, alias, fromhints[alias], iscrud
)
return ret
else:
# note we cancel the "subquery" flag here as well
return alias.element._compiler_dispatch(
self, lateral=lateral, **kwargs
)
def visit_subquery(self, subquery, **kw):
kw["subquery"] = True
return self.visit_alias(subquery, **kw)
def visit_lateral(self, lateral, **kw):
kw["lateral"] = True
return "LATERAL %s" % self.visit_alias(lateral, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw),
)
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw)
)
return text
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(
self,
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=None,
within_columns_clause=True,
need_column_expressions=False,
):
"""produce labeled columns present in a select()."""
impl = column.type.dialect_impl(self.dialect)
if impl._has_column_expression and (
need_column_expressions or populate_result_map
):
col_expr = impl.column_expression(column)
if populate_result_map:
def add_to_result_map(keyname, name, objects, type_):
self._add_to_result_map(
keyname, name, (column,) + objects, type_
)
else:
add_to_result_map = None
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr, column.name, alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr, name, alt_names=(column._key_label,)
)
elif (
asfrom
and isinstance(column, elements.ColumnClause)
and not column.is_literal
and column.table is not None
and not isinstance(column.table, selectable.Select)
):
result_expr = _CompileLabel(
col_expr,
coercions.expect(roles.TruncatedLabelRole, column.name),
alt_names=(column.key,),
)
elif (
not isinstance(column, elements.TextClause)
and (
not isinstance(column, elements.UnaryExpression)
or column.wraps_column_expression
)
and (
not hasattr(column, "name")
or isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(
col_expr,
coercions.expect(roles.TruncatedLabelRole, column.name),
alt_names=(column.key,),
)
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map,
)
return result_expr._compiler_dispatch(self, **column_clause_args)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite < 3.7.16).
"""
cloned = {}
column_translate = [{}]
created = set()
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if (
newelem._is_from_clause
and newelem._is_join
and isinstance(newelem.right, selectable.FromGrouping)
):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element], use_labels=True
).alias()
created.add(selectable_)
created.update(selectable_.c)
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw["transform_clue"] = "select_container"
newelem._copy_internals(clone=visit, **kw)
elif newelem._is_returns_rows and newelem._is_select_statement:
barrier_select = (
kw.get("transform_clue", None) == "select_container"
)
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw["transform_clue"] = "inside_select"
if not newelem._is_select_container:
froms = newelem.froms
newelem._raw_columns = list(newelem.selected_columns)
newelem._from_obj.update(froms)
newelem._reset_memoizations()
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select
):
self._result_columns[:] = [
result_rec
if col is tcol
else (
result_rec[0],
name,
tuple([col if obj is tcol else obj for obj in result_rec[2]]),
result_rec[3],
)
for result_rec, (name, col), (tname, tcol) in zip(
self._result_columns,
select._columns_plus_names,
transformed_select._columns_plus_names,
)
]
# TODO: it's not anticipated that we need to correct anon_map
# however if we do, this is what it looks like:
# for (name, col), (tname, tcol) in zip(
# select._columns_plus_names,
# transformed_select._columns_plus_names,
# ):
# if isinstance(name, elements._anonymous_label) and name != tname:
# m1 = re.match(r"^%\((\d+ .+?)\)s$", name)
# m2 = re.match(r"^%\((\d+ .+?)\)s$", tname)
# self.anon_map[m1.group(1)] = self.anon_map[m2.group(1)]
_default_stack_entry = util.immutabledict(
[("correlate_froms", frozenset()), ("asfrom_froms", frozenset())]
)
def _display_froms_for_select(self, select, asfrom, lateral=False):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
return froms
def visit_select(
self,
select,
asfrom=False,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
lateral=False,
**kwargs
):
needs_nested_translation = (
select.use_labels
and not nested_join_translation
and not self.stack
and not self.dialect.supports_right_nested_joins
)
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select
)
text = self.visit_select(
transformed_select,
asfrom=asfrom,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True,
**kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = need_column_expressions = (
toplevel
or entry.get("need_result_map_for_compound", False)
or entry.get("need_result_map_for_nested", False)
)
if compound_index > 0:
populate_result_map = False
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and "add_to_result_map" in kwargs:
del kwargs["add_to_result_map"]
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select
)
return text
froms = self._setup_select_stack(select, entry, asfrom, lateral)
column_clause_args = kwargs.copy()
column_clause_args.update(
{"within_label_clause": False, "within_columns_clause": False}
)
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c
for c in [
self._label_select_column(
select,
column,
populate_result_map,
asfrom,
column_clause_args,
name=name,
need_column_expressions=need_column_expressions,
)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[name for (key, name) in select._columns_plus_names],
[
name
for (key, name) in select_wraps_for._columns_plus_names
],
)
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs
)
if select._statement_hints:
per_dialect = [
ht
for (dialect_name, ht) in select._statement_hints
if dialect_name in ("*", self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs
)
self.stack.pop(-1)
return text
def _setup_select_hints(self, select):
byfrom = dict(
[
(
from_,
hinttext
% {"name": from_._compiler_dispatch(self, ashint=True)},
)
for (from_, dialect), hinttext in select._hints.items()
if dialect in ("*", self.dialect.name)
]
)
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom, lateral):
correlate_froms = entry["correlate_froms"]
asfrom_froms = entry["asfrom_froms"]
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms
),
implicit_correlate_froms=(),
)
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms,
)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
"asfrom_froms": new_correlate_froms,
"correlate_froms": all_correlate_froms,
"selectable": select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs
):
text += ", ".join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ", ".join(
[
f._compiler_dispatch(
self, asfrom=True, fromhints=byfrom, **kwargs
)
for f in froms
]
)
else:
text += ", ".join(
[
f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms
]
)
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
text += self.group_by_clause(select, **kwargs)
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (
select._limit_clause is not None
or select._offset_clause is not None
):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = (
sum([self.cte_positional[cte] for cte in self.ctes], [])
+ self.positiontup
)
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join([txt for txt in self.ctes.values()])
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def group_by_clause(self, select, **kw):
"""allow dialects to customize how GROUP BY is rendered."""
group_by = select._group_by_clause._compiler_dispatch(self, **kw)
if group_by:
return " GROUP BY " + group_by
else:
return ""
def order_by_clause(self, select, **kw):
"""allow dialects to customize how ORDER BY is rendered."""
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler."
)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(
self,
table,
asfrom=False,
iscrud=False,
ashint=False,
fromhints=None,
use_schema=True,
**kwargs
):
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = (
self.preparer.quote_schema(effective_schema)
+ "."
+ self.preparer.quote(table.name)
)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(
ret, table, fromhints[table], iscrud
)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs)
+ join_type
+ join.right._compiler_dispatch(self, asfrom=True, **kwargs)
+ " ON "
# TODO: likely need asfrom=True here?
+ join.onclause._compiler_dispatch(self, **kwargs)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict(
[
(table, hint_text)
for (table, dialect), hint_text in stmt._hints.items()
if dialect in ("*", self.dialect.name)
]
)
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text, stmt.table, dialect_hints[stmt.table], True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, **kw):
toplevel = not self.stack
self.stack.append(
{
"correlate_froms": set(),
"asfrom_froms": set(),
"selectable": insert_stmt,
}
)
crud_params = crud._setup_crud_params(
self, insert_stmt, crud.ISINSERT, **kw
)
if (
not crud_params
and not self.dialect.supports_default_values
and not self.dialect.supports_empty_insert
):
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support empty "
"inserts." % self.dialect.name
)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." % self.dialect.name
)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(
insert_stmt, insert_stmt._prefixes, **kw
)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
_, table_text = self._setup_crud_hints(insert_stmt, table_text)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ", ".join(
[preparer.format_column(c[0]) for c in crud_params_single]
)
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning
)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
select_text = self.process(self._insert_from_select, **kw)
if self.ctes and toplevel and self.dialect.cte_follows_insert:
text += " %s%s" % (self._render_cte_clause(), select_text)
else:
text += " %s" % select_text
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (", ".join(c[1] for c in crud_param_set))
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % ", ".join([c[1] for c in crud_params])
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw
)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and toplevel and not self.dialect.cte_follows_insert:
text = self._render_cte_clause() + text
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw["asfrom"] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE"
)
def visit_update(self, update_stmt, **kw):
toplevel = not self.stack
extra_froms = update_stmt._extra_froms
is_multitable = bool(extra_froms)
if is_multitable:
# main table might be a JOIN
main_froms = set(selectable._from_objects(update_stmt.table))
render_extra_froms = [
f for f in extra_froms if f not in main_froms
]
correlate_froms = main_froms.union(extra_froms)
else:
render_extra_froms = []
correlate_froms = {update_stmt.table}
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": update_stmt,
}
)
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(
update_stmt, update_stmt._prefixes, **kw
)
table_text = self.update_tables_clause(
update_stmt, update_stmt.table, render_extra_froms, **kw
)
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, **kw
)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text
)
else:
dialect_hints = None
text += table_text
text += " SET "
include_table = (
is_multitable and self.render_table_with_column_in_update_from
)
text += ", ".join(
c[0]._compiler_dispatch(self, include_table=include_table)
+ "="
+ c[1]
for c in crud_params
)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
render_extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (
self.returning or update_stmt._returning
) and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self, self.statement)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE"
)
def delete_table_clause(self, delete_stmt, from_table, extra_froms):
return from_table._compiler_dispatch(self, asfrom=True, iscrud=True)
def visit_delete(self, delete_stmt, **kw):
toplevel = not self.stack
crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw)
extra_froms = delete_stmt._extra_froms
correlate_froms = {delete_stmt.table}.union(extra_froms)
self.stack.append(
{
"correlate_froms": correlate_froms,
"asfrom_froms": correlate_froms,
"selectable": delete_stmt,
}
)
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(
delete_stmt, delete_stmt._prefixes, **kw
)
text += "FROM "
table_text = self.delete_table_clause(
delete_stmt, delete_stmt.table, extra_froms
)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints,
**kw
)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning
)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % self.preparer.format_savepoint(
savepoint_stmt
)
class StrSQLCompiler(SQLCompiler):
"""A :class:`.SQLCompiler` subclass which allows a small selection
of non-standard SQL features to render into a string value.
The :class:`.StrSQLCompiler` is invoked whenever a Core expression
element is directly stringified without calling upon the
:meth:`.ClauseElement.compile` method. It can render a limited set
of non-standard SQL constructs to assist in basic stringification,
however for more substantial custom or dialect-specific SQL constructs,
it will be necessary to make use of :meth:`.ClauseElement.compile`
directly.
.. seealso::
:ref:`faq_sql_expression_string`
"""
def _fallback_column_name(self, column):
return "<name unknown>"
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "<next sequence value: %s>" % self.preparer.format_sequence(seq)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in elements._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
def update_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return "FROM " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
def delete_extra_from_clause(
self, update_stmt, from_table, extra_froms, from_hints, **kw
):
return ", " + ", ".join(
t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
for t in extra_froms
)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ""
else:
table, sch = path[-1], path[0]
context.setdefault("table", table)
context.setdefault("schema", sch)
context.setdefault("fullname", preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE " + preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(
create_column, first_pk=column.primary_key and not first_pk
)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s")
% (table.description, column.name, ce.args[0])
)
)
const = self.create_table_constraints(
table,
_include_foreign_key_constraints=create.include_foreign_key_constraints, # noqa
)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(column, first_pk=first_pk)
const = " ".join(
self.process(constraint) for constraint in column.constraints
)
if const:
text += " " + const
return text
def create_table_constraints(
self, table, _include_foreign_key_constraints=None
):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend(
[
c
for c in table._sorted_constraints
if c is not table.primary_key and c not in omit_fkcs
]
)
return ", \n\t".join(
p
for p in (
self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None
or constraint._create_rule(self)
)
and (
not self.dialect.supports_alter
or not getattr(constraint, "use_alter", False)
)
)
if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError(
"Index '%s' is not associated " "with any table." % index.name
)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(
index.table, use_schema=include_table_schema
),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True
)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
index_name = self.preparer.format_index(index)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element),
)
def visit_set_table_comment(self, create):
return "COMMENT ON TABLE %s IS %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table(
drop.element
)
def visit_set_column_comment(self, create):
return "COMMENT ON COLUMN %s IS %s" % (
self.preparer.format_column(
create.element, use_table=True, use_schema=True
),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String()
),
)
def visit_drop_column_comment(self, drop):
return "COMMENT ON COLUMN %s IS NULL" % self.preparer.format_column(
drop.element, use_table=True
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cache is not None:
text += " CACHE %d" % create.element.cache
if create.element.order is True:
text += " ORDER"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element
)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or "",
)
def get_column_specification(self, column, **kwargs):
colspec = (
self.preparer.format_column(column)
+ " "
+ self.dialect.type_compiler.process(
column.type, type_expression=column
)
)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ""
def post_create_table(self, table):
return ""
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE
)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True
)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(
constraint.sqltext, include_table=False, literal_binds=True
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name)
for c in (
constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns
)
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
", ".join(
preparer.quote(f.parent.name) for f in constraint.elements
),
self.define_constraint_remote_table(
constraint, remote_table, preparer
),
", ".join(
preparer.quote(f.column.name) for f in constraint.elements
),
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
", ".join(self.preparer.quote(c.name) for c in constraint)
)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % self.preparer.validate_sql_phrase(
constraint.ondelete, FK_ON_DELETE
)
if constraint.onupdate is not None:
text += " ON UPDATE %s" % self.preparer.validate_sql_phrase(
constraint.onupdate, FK_ON_UPDATE
)
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % self.preparer.validate_sql_phrase(
constraint.initially, FK_INITIALLY
)
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % {"precision": type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError(
"Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_
)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
return "%s" % type_.__class__.__name__
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = schema._schema_getter(None)
def __init__(
self,
dialect,
initial_quote='"',
final_quote=None,
escape_quote='"',
quote_case_sensitive_collations=True,
omit_schema=False,
):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self.quote_case_sensitive_collations = quote_case_sensitive_collations
self._strings = {}
self._double_percents = self.dialect.paramstyle in (
"format",
"pyformat",
)
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
prep.schema_for_object = schema._schema_getter(schema_translate_map)
return prep
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
value = value.replace(self.escape_quote, self.escape_to_quote)
if self._double_percents:
value = value.replace("%", "%%")
return value
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def validate_sql_phrase(self, element, reg):
"""keyword sequence filter.
a filter for elements that are intended to represent keyword sequences,
such as "INITIALLY", "INITIALLY DEFERRED", etc. no special characters
should be present.
.. versionadded:: 1.3
"""
if element is not None and not reg.match(element):
raise exc.CompileError(
"Unexpected SQL phrase: %r (matching against %r)"
% (element, reg.pattern)
)
return element
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return (
self.initial_quote
+ self._escape_identifier(value)
+ self.final_quote
)
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value)
)
def _requires_quotes_illegal_chars(self, value):
"""Return True if the given identifier requires quoting, but
not taking case convention into account."""
return not self.legal_characters.match(util.text_type(value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema name.
The name is quoted if it is a reserved word, contains quote-necessary
characters, or is an instance of :class:`.quoted_name` which includes
``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
:param schema: string schema name
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote_schema.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote_schema.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
return self.quote(schema)
def quote(self, ident, force=None):
"""Conditionally quote an identfier.
The identifier is quoted if it is a reserved word, contains
quote-necessary characters, or is an instance of
:class:`.quoted_name` which includes ``quote`` set to ``True``.
Subclasses can override this to provide database-dependent
quoting behavior for identifier names.
:param ident: string identifier
:param force: unused
.. deprecated:: 0.9
The :paramref:`.IdentifierPreparer.quote.force`
parameter is deprecated and will be removed in a future
release. This flag has no effect on the behavior of the
:meth:`.IdentifierPreparer.quote` method; please refer to
:class:`.quoted_name`.
"""
if force is not None:
# not using the util.deprecated_params() decorator in this
# case because of the additional function call overhead on this
# very performance-critical spot.
util.warn_deprecated(
"The IdentifierPreparer.quote.force parameter is "
"deprecated and will be removed in a future release. This "
"flag has no effect on the behavior of the "
"IdentifierPreparer.quote method; please refer to "
"quoted_name()."
)
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_collation(self, collation_name):
if self.quote_case_sensitive_collations:
return self.quote(collation_name)
else:
return collation_name
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (
not self.omit_schema
and use_schema
and effective_schema is not None
):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table
)
if name is None:
if isinstance(constraint.name, elements._defer_none_name):
return None
else:
name = constraint.name
else:
name = constraint.name
if isinstance(name, elements._truncated_label):
if constraint.__visit_name__ == "index":
max_ = (
self.dialect.max_index_name_length
or self.dialect.max_identifier_length
)
else:
max_ = self.dialect.max_identifier_length
if len(name) > max_:
name = name[0 : max_ - 8] + "_" + util.md5_hex(name)[-4:]
else:
self.dialect.validate_identifier(name)
return self.quote(name)
def format_index(self, index):
return self.format_constraint(index)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name):
"""Prepare a quoted schema name."""
return self.quote(name)
def format_column(
self,
column,
use_table=False,
name=None,
table_name=None,
use_schema=False,
):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, "is_literal", False):
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ self.quote(name)
)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return (
self.format_table(
column.table, use_schema=use_schema, name=table_name
)
+ "."
+ name
)
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and effective_schema:
return (
self.quote_schema(effective_schema),
self.format_table(table, use_schema=False),
)
else:
return (self.format_table(table, use_schema=False),)
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = [
re.escape(s)
for s in (
self.initial_quote,
self.final_quote,
self._escape_identifier(self.final_quote),
)
]
r = re.compile(
r"(?:"
r"(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s"
r"|([^\.]+))(?=\.|$))+"
% {"initial": initial, "final": final, "escaped": escaped_final}
)
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [
self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]
]
|
wujuguang/sqlalchemy
|
lib/sqlalchemy/sql/compiler.py
|
Python
|
mit
| 126,093
|
import time
import pandas as pd
import spacy
import scattertext as st
nlp = spacy.load('en_core_web_sm', parser=False)
t0 = time.time()
reviews_df = pd.read_csv('https://github.com/JasonKessler/ICLR18ReviewVis/raw/master/iclr2018_reviews.csv.bz2')
reviews_df['parse'] = reviews_df['review'].apply(st.whitespace_nlp_with_sentences)
full_corpus = (st.CorpusFromParsedDocuments(reviews_df,
category_col='category',
parsed_col='parse',
#feats_from_spacy_doc=st.PhraseMachinePhrases()
).build())
term_ranker = st.OncePerDocFrequencyRanker
corpus = (full_corpus
.keep_only_these_categories(['Accept, Positive', 'Accept, Negative',
'Reject, Positive', 'Reject, Negative'],
False)
.get_unigram_corpus()
.select(st.ClassPercentageCompactor(term_count=5)))
print('finding priors', time.time() - t0, 's')
priors = (st.PriorFactory(full_corpus, starting_count=0.01)
.use_all_categories()
.get_priors())
print('building four square', time.time() - t0, 's')
four_square = st.FourSquare(
corpus,
category_a_list=['Accept, Positive'],
not_category_a_list=['Reject, Negative'],
category_b_list=['Accept, Negative'],
not_category_b_list=['Reject, Positive'],
term_ranker=term_ranker,
scorer=st.LogOddsRatioInformativeDirichletPrior(priors, 500, 'word'),
labels={'a': 'Positive Reviews of Accepted Papers',
'b': 'Negative Reviews of Accepted Papers',
'not_a_and_not_b': 'Rejections',
'a_and_b': 'Acceptances',
'a_and_not_b': 'Positive Reviews',
'b_and_not_a': 'Negative Reviews',
'not_a': 'Negative Reviews of Rejected Papers',
'not_b': 'Positive Reviews of Rejected Papers',
}
)
print('making html', time.time() - t0, 's')
html = st.produce_four_square_explorer(four_square=four_square,
x_label='Pos-Neg',
y_label='Accept-Reject',
num_terms_semiotic_square=5,
minimum_term_frequency=0,
pmi_threshold_coefficient=0,
term_ranker=term_ranker,
metadata=(corpus._df['category'] + ': '
+ corpus._df.rating + ', '
+ corpus._df['title']))
fn = 'demo_four_square.html'
open(fn, 'wb').write(html.encode('utf-8'))
print('Open ' + fn + ' in Chrome or Firefox.')
print('done', time.time() - t0, 's')
|
JasonKessler/scattertext
|
demo_four_square.py
|
Python
|
apache-2.0
| 2,829
|
# -*- coding: utf-8 -*-
# Copyright 2016 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
class TestOptOut(common.TransactionCase):
def setUp(self):
super(TestOptOut, self).setUp()
self.company = self.env.ref('base.main_company')
def test_company_default_values(self):
# test loading of default company values
self.assertTrue(self.company.default_opt_out is True)
def test_configuration_default_values(self):
# test loading of default configuration values
config_model = self.env['base.config.settings']
config = config_model.create({})
self.assertTrue(config.default_opt_out is True)
def test_partner_default_values(self):
# test loading of default partner values
new_partner = self.env['res.partner'].create({
'name': 'New Test Partner'
})
self.assertTrue(new_partner.opt_out is True)
|
VitalPet/addons-onestein
|
mail_opt_out_default/test/test_opt_out.py
|
Python
|
agpl-3.0
| 1,004
|
import bpy
import bmesh
import sys,getopt
import os
from math import pi
bpy.ops.object.mode_set(mode='OBJECT')
lista = sys.argv
print("Argument List:"+ str(lista))
print(str(lista[len(sys.argv)-1]))
profZ = float(lista[len(sys.argv)-1])
print("hello, imprimir vetices del poligono en " + str(profZ))
#bpy.context.tool_settings.mesh_select_mode = [True, False, False]
scene = bpy.context.scene
scene.layers = [True] * 20 # Show all layers
G = "8.4"
contador = 0
currfi = bpy.data.filepath +"z"+str(profZ) + ".txt"
outfile = open(currfi,'w')
outfile.write("---- "+bpy.data.filepath+"---\n")
outfile.write("vetices del poligono en " + str(profZ)+" \n")
#obj = scene.objects['Icosphere']
for obj in scene.objects:
if obj.type == 'MESH':
bpy.ops.object.mode_set(mode='OBJECT')
# Dividier a la profundidad profZ
# 1 agregar semiespacio auxiliar
bpy.ops.mesh.primitive_cube_add(location=(0.0,0.0,profZ-1000))
ob = bpy.context.object
ob.name = 'Borra'
ob.scale=((1000,1000,1000))
# 2 seleccionar el semiespacio auxiliar
ob.select = True
bpy.context.scene.objects.active = ob
# 3 crear la operación binaria
boo = ob.modifiers.new('Booh', 'BOOLEAN')
boo.object = obj
boo.operation = 'DIFFERENCE'
# 4.1 Aplicar el operador
bpy.ops.object.select_all(action="TOGGLE")
ob.select = True
bpy.context.scene.objects.active = ob
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Booh")
# 4.2 Aplicar la translación y escala
bpy.ops.object.transform_apply(location=True)
bpy.ops.object.transform_apply(scale=True)
# 5 Seleccionar el plano auxiliar creado y tomar los vértices
ob.select = True
bpy.context.scene.objects.active = ob
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(ob.data)
for f in bm.verts:
cen = f.co
if abs(cen.z - profZ) < 0.0001:
print(format(cen.x,G)+" "+format(cen.y,G))
outfile.write(format(cen.x,G)+" "+format(cen.y,G)+"\n")
contador = contador + 1
outfile.write("Fueron "+str(contador)+" vertices \n")
outfile.close()
print("There are "+ str(contador) + " points")
#import bpy
#import bmesh
#scene = bpy.context.scene
#obj = scene.objects['Icosphere']
#profZ = 0
#G = "8.4"
#bpy.ops.mesh.primitive_cube_add(location=(0.0,0.0,1000-profZ))
#ob = bpy.context.object
#ob.name = 'Borra'
#ob.scale=((1000,1000,1000))
#boo = obj.modifiers.new('Booh', 'BOOLEAN')
#boo.object = ob
#boo.operation = 'DIFFERENCE'
#bpy.ops.object.select_all(action="TOGGLE")
#obj.select = True
#bpy.context.scene.objects.active = obj
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Booh")
#bpy.ops.object.select_all(action="TOGGLE")
#ob.select = True
#bpy.context.scene.objects.active = ob
#bpy.ops.object.delete()
#for f in bm.verts:
# cen = f.co
# if cen.z == profZ:
# if (abs(cen.x) != 1000) and (abs(cen.y) != 1000):
# print(format(cen.x,G)+" "+format(cen.y,G))
# contador = contador + 1
|
marshallcoz/turnt-octo-happiness
|
LayIBEM3d/geometria3d/poligonoZ0/poligonoEnLaSuperficie.py
|
Python
|
mit
| 3,151
|
# coding: utf-8
# In[1]:
import numpy as np
import os
import sys
import multiprocessing
import scipy.sparse
import pickle
import getopt
# In[2]:
def data_to_dist(X):
s = np.sum(X,axis=1)
X = X / s[:,None]
return X, s
# In[3]:
try:
opts, args = getopt.getopt(sys.argv[1:],"i:m:t:d:",["idir=","num-eq-classes=","TCC-file=","TCCD-file="])
except getopt.GetoptError:
print ("getopterrror")
print ('usage is : \n python get_tcc_dist.py -i input_tcc_dir -m number-of-eq-classes -t path-to-output-TCC-file -d path-to-output-TCC-dist-file')
sys.exit(1)
expr_dir=''
num_eq_classes=0
norm_file=''
unorm_file=''
for opt,arg in opts:
if opt in ("-i", "--idir"):
expr_dir=arg
elif opt in ("-m","--num-eq-classes"):
num_eq_classes=int(arg)
elif opt in ("-t","--TCC-file"):
unorm_file=arg
elif opt in ("-d","--TCCD-file"):
norm_file=arg
if (not expr_dir) or (not num_eq_classes) or (not norm_file) or (not unorm_file):
print ('usage is : \n python get_tcc_dist.py -i input_tcc_dir -m number-of-eq-classes -t path-to-output-TCC-file -d path-to-output-TCC-dist-file')
sys.exit(1)
fl_list='file_list_Trapnell.dat'
# In[10]:
eq_dict={}
flnames=sorted([x for x in os.listdir(expr_dir) if x.endswith('.class')])
eq_class_hash=num_eq_classes
for flname in flnames:
with open(expr_dir+flname) as flptr:
for line in flptr:
line = line.strip()
vect = line.split()
if not vect[0].isdigit():
if vect[0] not in eq_dict:
eq_dict[vect[0]]=eq_class_hash
eq_class_hash+=1
TCC_mat=np.zeros((len(flnames),max(eq_dict.values())+1))
for cell_number in range(len(flnames)):
cur_flname=flnames[cell_number]
with open(expr_dir+cur_flname) as flptr1:
for line in flptr1:
line = line.strip()
vect = line.split()
assert len(vect)==2
if vect[0].isdigit():
index = int(vect[0])-1
else:
index = eq_dict[vect[0]]
#print index
value = int(vect[1])
#print value
TCC_mat[cell_number][index] = value
#print (np.shape(TCC_mat))
#print (sum(TCC_mat[0]>0))
TCC_dist, num_mapped_reads =data_to_dist(TCC_mat)
#print (TCC_dist.shape)
S=scipy.sparse.csr_matrix(TCC_dist)
S1=scipy.sparse.csr_matrix(TCC_mat)
with open(norm_file, 'wb') as outfile:
pickle.dump(S, outfile, pickle.HIGHEST_PROTOCOL)
with open(unorm_file, 'wb') as outfile:
pickle.dump(S1, outfile, pickle.HIGHEST_PROTOCOL)
with open(fl_list,'wb') as outfile:
pickle.dump(flnames, outfile, pickle.HIGHEST_PROTOCOL)
# In[ ]:
|
govinda-kamath/clustering_on_transcript_compatibility_counts
|
Trapnell_pipeline/get_tcc_dist.py
|
Python
|
mit
| 2,722
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 24 19:01:10 2014
@author: zhaoyd
"""
|
adamchau/goagent_localupdate
|
backup_ini.py
|
Python
|
mit
| 93
|
#!/usr/bin/env python3
# imports
import logging
from hardware_controller import *
from time import sleep, time
import queue
class Robot:
""" Main class for controlling our robot morTimmy
The brain of the robot is a raspberry Pi and the low level
electronic are handled by an Arduino. The Arduino provides
an interface to the DC motors and various sensors
"""
class State:
""" Set the state of the Robot """
running = "running"
stopped = "stopped"
autonomous = "autonomous"
# Note: only variables belonging to all
# instances of the class belong here. Others
# should be initialised in __init__
MIN_DISTANCE_TO_OBJECT = 10
def __init__(self):
""" Called when the robot class is created.
It intializes the sensor data queue and sets up the
logging output file
Returns:
Raises:
TODO: Add proper error handling.
"""
self.LOG_FILENAME = 'my_morTimmy.log'
logging.basicConfig(filename=self.LOG_FILENAME,
level=logging.DEBUG,
filemode='w',
format='%(asctime)s %(levelname)s %(message)s')
self.state = self.State()
self.currentState = self.state.stopped
self.arduino = HardwareController()
self.runningTime = 0
self.lastSensorReading = 0
logging.info('initialising morTimmy the robot')
self.sensorDataQueue = queue.Queue()
self.initialize()
def initialize(self):
""" (re)initializes the robot.
Responsible for setting up the connection to the Arduino.
The function loops until a connection is established
"""
self.arduino.initialize()
while not self.arduino.isConnected:
print ("Failed to establish connection to Arduino, retrying in 5s")
logging.warning("Failed to establish connection to Arduino, "
"retrying in 5s")
sleep(5) # wait 5sec before trying again
self.arduino.initialize()
logging.info('Connected to Arduino through serial connection')
self.runningTime = 0
def run(self):
""" The main robot loop """
# Check connection to arduino, reinitialize if not
if not self.arduino.isConnected:
self.arduino.initialize()
currentTime = time()
# Turn robot randomly to the left or right when an object is near
if self.arduino.getDistance() <= self.MIN_DISTANCE_TO_OBJECT:
pass
# Move robot forward if stopped for 5sec
if self.currentState == self.state.stopped and (currentTime - self.runningTime) >= 5:
self.arduino.sendMessage(MODULE_MOTOR, CMD_MOTOR_FORWARD, 255)
self.runningTime = currentTime
self.currentState = self.state.running
print("Robot moving forward")
# Stop robot if running for 5sec
elif self.currentState == self.state.running and (currentTime - self.runningTime) >= 5:
self.arduino.sendMessage(MODULE_MOTOR, CMD_MOTOR_STOP)
self.runningTime = currentTime
self.currentState = self.state.stopped
print("Robot stopped")
# Read bytes from the Arduino and add messages to the Queue if found
self.arduino.recvMessage()
# Process all received messages in the queue
while not self.arduino.recvMessageQueue.empty():
recvMessage = self.arduino.recvMessageQueue.get_nowait()
if recvMessage is None:
# Why does the queue always return a None object?
break
elif recvMessage == 'Invalid':
logging.error('Received invalid packet, ignoring')
elif recvMessage['module'] == chr(MODULE_DISTANCE_SENSOR):
self.arduino.setDistance(recvMessage['data'])
else:
logging.warning("Message with unknown module or command received. Message details:")
logging.warning("msgID: %d ackID: %d module: %s "
"commandType: %s data: %d checksum: %s" % (recvMessage['messageID'],
recvMessage['acknowledgeID'],
hex(recvMessage['module']),
hex(recvMessage['commandType']),
recvMessage['data'],
hex(recvMessage['checksum'])))
def main():
""" This is the main function of our script.
It will only contain a very limited program
logic. The main action happens in the Robot class
"""
morTimmy = Robot()
try:
while(True):
morTimmy.run()
except KeyboardInterrupt:
print("Thanks for running me!")
if __name__ == '__main__':
main()
|
thiezn/morTimmy
|
raspberrypi/morTimmy/morTimmy.py
|
Python
|
mit
| 5,132
|
"""
@author: kt12
Kenneth Tsuji
2017-07-17
@editor: adomakor12
Kenneth Tsuji
2017-07-17
"""
import sys, os, logging
from dotenv import load_dotenv
import time
import pymongo
import tweepy
import json
# Set number of tweets to record, sleep between API calls, and subject matter
num_tweets = int(sys.argv[1])
sleep = float(sys.argv[2])
subject = sys.argv[3:]
subject_type = (type(subject))
# Append hashtags and de-hashed terms to subject_list
subject_list = []
# Check if input is single string or list of terms or hashtag
if subject_type is str:
# check hashtag
if '#' in subject:
subject_list.extend((subject, subject.replace('#', '')))
else:
subject_list.append(subject)
elif subject_type is list:
# If type is lst and len is 1, must be hashtag
for k in subject:
if '#' in k:
subject_list.extend((k, k.replace('#', '')))
else:
subject_list.append(k)
else:
raise Exception
# Set up logging
logger = logging.getLogger(__name__)
# Logs at debug level will be recorded
logger.setLevel(logging.DEBUG)
# Include STDOUT in log stream
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
logger.addHandler(console_handler)
# Push formatted logs out to .log file
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_handler = logging.FileHandler('twitter_' + timestamp + '.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Logger initiated.')
# Credentials must be stored in .env file in same directory.
# Load Twitter credentials
load_dotenv('.env')
CONSUMER_KEY = os.environ.get('consumer_key')
CONSUMER_SECRET = os.environ.get('consumer_secret')
ACCESS_TOKEN = os.environ.get('access_token')
ACCESS_TOKEN_SECRET = os.environ.get('access_token_secret')
logger.debug('Twitter credentials loaded.')
print('The topic(s) you chose to track through Twitter: {}.'.format(subject_list))
logger.debug('Topic decided: {}.'.format(subject_list))
# Set MongoDB local host (on same machine)
host = 'mongodb://127.0.0.1:27017'
# Connect to twitterdb
# Will be created if it doesn't exist
client = pymongo.MongoClient(host)
logger.debug('Mongo connected.')
document_db = client.twitterdb
# Convert subject_list to string and underscores
col_name = '_'.join(k for k in subject_list if '#' not in k)
# Check if collection name already exists
if col_name in document_db.collection_names():
logger.debug('Collection {} exists'.format(col_name))
collection = document_db[col_name]
else:
document_db.create_collection(col_name)
logger.debug('New collection {}'.format(col_name))
collection = document_db[col_name]
print("Tweets will be stored in the MongoDB collection: {}".format(col_name))
# Set up tweepy credentials
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
logger.debug('Tweepy has loaded credentials.')
# Create listener based on tweepy Streamlistener
""" If used as streamer, max_limit and count will impact the
number of tweets collected. However, as the connection is
being broken for purpose of refreshing the tweet stream,
they are vestigial"""
class Listener(tweepy.StreamListener):
def __init__(self, api, max_limit=1, timeout=sleep*5):
#super(Listener, self).__init__()
self.api = api
self.count = 0
self.limit = max_limit
self.timeout = timeout
def on_status(self, status):
print('on_status')
logger.debug(status.text)
def on_error(self, status_code):
logger.debug(status_code)
if status_code == 420:
# Returning False disconnects the stream
return False
def on_timeout(self):#when times out in twitter- not runtime
print('TIME OUT')
return False
def on_data(self, data):
logger.debug('Pulling in tweets')
while self.count < self.limit:
# Insert into collection from above
collection = document_db[col_name]
tweet_json = json.loads(data)
collection.insert_one(tweet_json)
text = tweet_json['text']
user = tweet_json['user']['screen_name']
print(user,":", text)
self.count += 1
else:
"""Called when stream connection times out"""
return False
def _run(self):
# Authenticate
url = "https://%s%s" % (self.host, self.url)
# Connect and process the stream
error_counter = 0
resp = None
exception = None
while self.running:
if self.retry_count is not None:
if error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
auth = self.auth.apply_auth()
resp = self.session.request('POST',
url,
data=self.body,
timeout=self.timeout,
stream=True,
auth=auth,
verify=self.verify)
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
if resp.status_code == 420:
self.retry_time = max(self.retry_420_start,
self.retry_time)
sleep(self.retry_time)
self.retry_time = min(self.retry_time * 2,
self.retry_time_cap)
else:
error_counter = 0
self.retry_time = self.retry_time_start
self.snooze_time = self.snooze_time_step
self.listener.on_connect()
self._read_loop(resp)
except (Timeout, ssl.SSLError, ConnectionError, ReadTimeoutError, ProtocolError) as exc:
# This is still necessary, as a SSLError can actually be
# thrown when using Requests
# If it's not time out treat it like any other exception
if isinstance(exc, ssl.SSLError):
if not (exc.args and 'timed out' in str(exc.args[0])):
exception = exc
break
if self.listener.on_timeout() is False:
break
if self.running is False:
break
sleep(self.snooze_time)
self.snooze_time = min(self.snooze_time + self.snooze_time_step,
self.snooze_time_cap)
except Exception as exc:
exception = exc
# any other exception is fatal, so kill loop
break
print('Storing the following tweets:')
for _ in range(num_tweets):
streamer = tweepy.Stream(auth=auth, listener=Listener(api=tweepy.API(), timeout=sleep*5), running = True, timeout=sleep*5)
print('I\'M LOOKING')
streamer.filter(track=subject_list)
time.sleep(sleep)
logger.debug('Done pulling in tweets.')
print('Done pulling in tweets.')
|
adomakor412/ENODO_global
|
social_analyzer.py
|
Python
|
mit
| 7,562
|
from __future__ import print_function, division, absolute_import
#
# Copyright (c) 2010 Red Hat, Inc.
#
# Authors: Jeff Ortel <jortel@redhat.com>
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import logging
import yum
from yum.plugins import TYPE_CORE
from rhsm import logutil
from subscription_manager.productid import ProductManager, RpmVersion
from subscription_manager.utils import chroot
from subscription_manager.injectioninit import init_dep_injection
requires_api_version = '2.6'
plugin_type = (TYPE_CORE,)
log = logging.getLogger('rhsm-app.' + __name__)
def postverifytrans_hook(conduit):
"""
Update product ID certificates.
"""
# register rpm name for yum history recording
# yum on 5.7 doesn't have this method, so check for it
if hasattr(conduit, 'registerPackageName'):
conduit.registerPackageName("subscription-manager")
try:
init_dep_injection()
except ImportError as e:
conduit.error(3, str(e))
return
logutil.init_logger_for_yum()
# If a tool (it's, e.g., Anaconda and Mock) manages a chroot via
# 'yum --installroot', we must update certificates in that directory.
chroot(conduit.getConf().installroot)
try:
pm = YumProductManager(conduit._base)
pm.update_all()
conduit.info(3, 'Installed products updated.')
except Exception as e:
conduit.error(3, str(e))
class YumProductManager(ProductManager):
def __init__(self, base):
self.base = base
ProductManager.__init__(self)
def update_all(self):
return self.update(self.get_enabled(),
self.get_active(),
self.check_version_tracks_repos())
def get_enabled(self):
"""find yum repos that are enabled"""
lst = []
enabled = self.base.repos.listEnabled()
# skip repo's that we don't have productid info for...
for repo in enabled:
try:
fn = repo.retrieveMD(self.PRODUCTID)
cert = self._get_cert(fn)
if cert is None:
continue
lst.append((cert, repo.id))
except yum.Errors.RepoMDError as e:
# We have to look in all repos for productids, not just
# the ones we create, or anaconda doesn't install it.
self.meta_data_errors.append(repo.id)
except Exception as e:
log.warning("Error loading productid metadata for %s." % repo)
log.exception(e)
self.meta_data_errors.append(repo.id)
if self.meta_data_errors:
log.debug("Unable to load productid metadata for repos: %s",
self.meta_data_errors)
return lst
def get_active(self):
"""
find the list of repo's that provide packages that are actually installed
"""
active = set([])
installed_packages = self.base.rpmdb.returnPackages()
for pkg in installed_packages:
try:
# pkg.repoid contains only "installed" string not valid origin
# of repository
repo = pkg.yumdb_info.from_repo
except AttributeError:
# When package is installed from local RPM and not from repository
# then yumdb_info doesn't have from_source attribute in some case
log.debug('Unable to get repo for package: %s' % pkg.name)
else:
# When repo name begins with '/', then it means that RPM was installed
# from local .rpm file. Thus productid certificate cannot exist for such
# origin of RPM
if repo[0] == '/':
log.debug('Not adding local source of RPM: %s to set of active repos' % repo)
continue
active.add(repo)
return active
@staticmethod
def check_version_tracks_repos():
major, minor, micro = yum.__version_info__
yum_version = RpmVersion(version="%s.%s.%s" % (major, minor, micro))
needed_version = RpmVersion(version="3.2.28")
if yum_version >= needed_version:
return True
return False
|
Lorquas/subscription-manager
|
src/plugins/product-id.py
|
Python
|
gpl-2.0
| 4,795
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
import os
import os.path
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from binascii import unhexlify
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import NetworkThread
from test_framework.nodemessages import *
from test_framework.bumessages import *
from test_framework.bunode import BasicBUCashNode, VersionlessProtoHandler
class PingEarlyTest(BitcoinTestFramework):
def __init__(self):
self.nodes = []
BitcoinTestFramework.__init__(self)
def setup_chain(self):
pass
def setup_network(self, split=False):
pass
def restart_node(self, send_initial_version = True):
# remove any potential banlist
banlist_fn = os.path.join(
node_regtest_dir(self.options.tmpdir, 0),
"banlist.dat")
print("Banlist file name:", banlist_fn)
try:
os.remove(banlist_fn)
print("Removed old banlist %s.")
except:
pass
stop_nodes(self.nodes)
wait_bitcoinds()
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
self.nodes = [ start_node(0, self.options.tmpdir, ["-debug"]) ]
self.pynode = pynode = BasicBUCashNode()
pynode.connect(0, '127.0.0.1', p2p_port(0), self.nodes[0],
protohandler = VersionlessProtoHandler(),
send_initial_version = send_initial_version)
return pynode.cnxns[0]
def run_test(self):
logging.info("Testing early ping replies")
conn = self.restart_node(send_initial_version = False)
conn.send_message(msg_ping(), pushbuf=True)
nt = NetworkThread()
nt.start()
conn.wait_for(lambda : conn.pong_counter)
conn.connection.disconnect_node()
nt.join()
if __name__ == '__main__':
xvt = PingEarlyTest()
xvt.main()
|
BitcoinUnlimited/BitcoinUnlimited
|
qa/rpc-tests/pingearly.py
|
Python
|
mit
| 2,275
|
#!/usr/bin/env python
'''
Example module
Take a look at the test module to check how events are handled.
'''
about = "About message goes here."
def main(server, initOnChannel, usrManager):
# This gets called when the module is initialized.
# InitOnChannel is the channel this module was loaded. Or nothing
# if it was loaded at startup.
def destroy(server):
# This gets called when the module get's unloaded.
def cmd(server, word, word_eol, usrManager):
# This is called when a command is received(|cmd or |whatever)
|
dom96/MDSBot
|
modules/base.py
|
Python
|
gpl-2.0
| 539
|
import clq
import clq.extensions
import clq.backends.opencl as ocl
import clq.extensions.language_types as lang #the regex types extension.
OpenCL = ocl.Backend()
#TEST: Memoizing Language based on regular expression equivalence
L1 = lang.ConstrainedString(OpenCL, "(.?)+")
L2 = lang.ConstrainedString(OpenCL, ".*")
assert L1 == L2
assert L1.is_subtype(L2)
assert L2.is_subtype(L1)
L3 = lang.ConstrainedString(OpenCL, ".?")
assert L1 != L3
subtype_of_l1 = lang.ConstrainedString(OpenCL, ".+")
assert subtype_of_l1 != L1
#TODO test to make sure memoizing is backend-specific.
#TEST: Reflection and grammar inclusion
L1 = lang.ConstrainedString(OpenCL,".")
L2 = lang.ConstrainedString(OpenCL,".+")
assert L1 != L2
assert L1.is_subtype(L1)
assert L2.is_subtype(L2)
assert L2.is_subtype(L1)
@clq.fn
def test_return(a):
return a
test_return = test_return.compile(OpenCL, L1)
assert test_return.return_type == L1
#TEST: Function returning a Language
@clq.fn
def test(a):
return a
test1 = test.compile(OpenCL, L1)
assert test1.return_type == L1
L3 = lang.ConstrainedString(OpenCL, L1._regex)
test2 = test.compile(OpenCL, L3)
assert test2.return_type == L3
#TEST: Language factor interning. This fails b/c interning isn't working yet.
@clq.fn
def test2(a):
return a
test2 = test2.compile(OpenCL, lang.ConstrainedString(OpenCL, L1._regex))
assert test2.return_type == L1
# Note: rhs + lhs for Language types has type
# Language<(rhs._regex)(lhs._regex)> and requires rhs <: lhs
# The subtyping requirement is somewhat arbitrary.
#TEST: Return type of concatenation
sub = lang.ConstrainedString(OpenCL, ".")
super = lang.ConstrainedString(OpenCL, ".+")
@clq.fn
def test_concatenation(a,b):
return a + b
test_concatenation = test_concatenation.compile(OpenCL, super, sub)
assert test_concatenation.return_type == lang.ConstrainedString(OpenCL,"..+")
#TEST: Subtyping
super_type = lang.ConstrainedString(OpenCL, "a+")
sub_type = lang.ConstrainedString(OpenCL, "a")
@clq.fn
def return_sub(x):
return x
return_sub = return_sub.compile(OpenCL, sub_type)
assert return_sub.return_type == sub_type
@clq.fn
def assign_to_sub(x,y):
x = y
return x
assign_to_sub = assign_to_sub.compile(OpenCL, super_type, sub_type)
assert assign_to_sub.return_type == super_type
@clq.fn
def return_super(a, b, return_sub):
return return_sub(b) + a
return_super = return_super.compile(OpenCL,
super_type,
sub_type,
return_sub.cl_type)
assert return_super.return_type == lang.ConstrainedString(OpenCL, "aa+")
# The example below fails because the lhs must be a subtype of the rhs.
@clq.fn
def fail_check(a, return_sub):
return return_sub(a) + a
try:
fail_check = fail_check.compile(OpenCL, super_type, return_sub.cl_type)
fail_check.return_type #force resolution
assert False
except clq.TypeResolutionError:
assert True
# TEST: Casting
@clq.fn
def upcast(a,b):
return cast(a,b)
upcast = upcast.compile(OpenCL, sub_type, super_type)
assert upcast.return_type == super_type
#@clq.fn
#def downcast(a,b):
# return cast(a,b)
#downcast = downcast.compile(OpenCL, super_type, sub_type)
#assert downcast.return_type == sub_type
@clq.fn
def impossiblecast(a,b):
return cast(a,b)
try:
impossiblecast = impossiblecast.compile(OpenCL, super_type, ocl.int)
print impossiblecast.program_item.code
assert False
except clq.CodeGenerationError as e:
assert True #should fail b/c ocl.int doesn't support runtime cast checks.
#this is always a downcast, so there should always be a check.
#@clq.fn
#def topcast(a):
# return cast("some user input",a)
#topcast = topcast.compile(OpenCL, super_type)
#assert topcast.return_type == super_type
#print topcast.program_item.code
#This is always an upcast, so there should never be a check.
@clq.fn
def bottomcast(a):
return cast(a, "string") #how to use a type variable here?
bottomcast = bottomcast.compile(OpenCL, super_type)
assert bottomcast.return_type == ocl.string
# FAIL; something's wrong with resolve_Assign.
#@clq.fn
#def assign(a,b):
# a = b
# return a
#try:
# assign_if = assign.compile(OpenCL, super_type, return_sub.cl_type)
# #should result in an error; assigning a supertype to a subtype.
# assign_if.return_type
# assert False
# #assert ocl.float.is_subtype(ocl.int)
#except clq.TypeResolutionError:
# assert True
|
cyrus-/ace
|
tests/subtyping_hello.py
|
Python
|
lgpl-3.0
| 4,474
|
"""The tests for the State vacuum Mqtt platform."""
from copy import deepcopy
import json
from unittest.mock import patch
import pytest
from homeassistant.components import vacuum
from homeassistant.components.mqtt import CONF_COMMAND_TOPIC, CONF_STATE_TOPIC
from homeassistant.components.mqtt.vacuum import CONF_SCHEMA, schema_state as mqttvacuum
from homeassistant.components.mqtt.vacuum.const import MQTT_VACUUM_ATTRIBUTES_BLOCKED
from homeassistant.components.mqtt.vacuum.schema import services_to_strings
from homeassistant.components.mqtt.vacuum.schema_state import SERVICE_TO_STRING
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_BATTERY_LEVEL,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
)
from homeassistant.const import (
CONF_NAME,
CONF_PLATFORM,
ENTITY_MATCH_ALL,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.vacuum import common
COMMAND_TOPIC = "vacuum/command"
SEND_COMMAND_TOPIC = "vacuum/send_command"
STATE_TOPIC = "vacuum/state"
DEFAULT_CONFIG = {
CONF_PLATFORM: "mqtt",
CONF_SCHEMA: "state",
CONF_NAME: "mqtttest",
CONF_COMMAND_TOPIC: COMMAND_TOPIC,
mqttvacuum.CONF_SEND_COMMAND_TOPIC: SEND_COMMAND_TOPIC,
CONF_STATE_TOPIC: STATE_TOPIC,
mqttvacuum.CONF_SET_FAN_SPEED_TOPIC: "vacuum/set_fan_speed",
mqttvacuum.CONF_FAN_SPEED_LIST: ["min", "medium", "high", "max"],
}
DEFAULT_CONFIG_2 = {
vacuum.DOMAIN: {"platform": "mqtt", "schema": "state", "name": "test"}
}
async def test_default_supported_features(hass, mqtt_mock):
"""Test that the correct supported features."""
assert await async_setup_component(
hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG}
)
await hass.async_block_till_done()
entity = hass.states.get("vacuum.mqtttest")
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(
["start", "stop", "return_home", "battery", "status", "clean_spot"]
)
async def test_all_commands(hass, mqtt_mock):
"""Test simple commands send to the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "start", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "stop", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "pause", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "locate", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "clean_spot", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "return_to_base", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/set_fan_speed", "medium", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/send_command", "44 FE 93", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
async def test_commands_without_supported_features(hass, mqtt_mock):
"""Test commands which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["status"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
mqtt_mock.async_publish.assert_not_called()
async def test_status(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
message = """{
"battery_level": 61,
"state": "docked",
"fan_speed": "min"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
assert state.attributes.get(ATTR_FAN_SPEED) == "min"
assert state.attributes.get(ATTR_FAN_SPEED_LIST) == ["min", "medium", "high", "max"]
async def test_no_fan_vacuum(hass, mqtt_mock):
"""Test status updates from the vacuum when fan is not supported."""
config = deepcopy(DEFAULT_CONFIG)
del config[mqttvacuum.CONF_FAN_SPEED_LIST]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 61,
"state": "docked"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
@pytest.mark.no_fail_on_log_exception
async def test_status_invalid_json(hass, mqtt_mock):
"""Test to make sure nothing breaks if the vacuum sends bad JSON."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "vacuum/state", '{"asdfasas false}')
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNKNOWN
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2, MQTT_VACUUM_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one vacuum per unique_id."""
config = {
vacuum.DOMAIN: [
{
"platform": "mqtt",
"schema": "state",
"name": "Test 1",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"schema": "state",
"name": "Test 2",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, vacuum.DOMAIN, config)
async def test_discovery_removal_vacuum(hass, mqtt_mock, caplog):
"""Test removal of discovered vacuum."""
data = '{ "schema": "state", "name": "test", "command_topic": "test_topic"}'
await help_test_discovery_removal(hass, mqtt_mock, caplog, vacuum.DOMAIN, data)
async def test_discovery_update_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
config1 = {"schema": "state", "name": "Beer", "command_topic": "test_topic"}
config2 = {"schema": "state", "name": "Milk", "command_topic": "test_topic"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, vacuum.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
data1 = '{ "schema": "state", "name": "Beer", "command_topic": "test_topic"}'
with patch(
"homeassistant.components.mqtt.vacuum.schema_state.MqttStateVacuum.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "schema": "state", "name": "Beer", "command_topic": "test_topic#"}'
data2 = '{ "schema": "state", "name": "Milk", "command_topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2, payload="{}"
)
@pytest.mark.parametrize(
"service,topic,parameters,payload,template",
[
(
vacuum.SERVICE_START,
"command_topic",
None,
"start",
None,
),
(
vacuum.SERVICE_CLEAN_SPOT,
"command_topic",
None,
"clean_spot",
None,
),
(
vacuum.SERVICE_SET_FAN_SPEED,
"set_fan_speed_topic",
{"fan_speed": "medium"},
"medium",
None,
),
(
vacuum.SERVICE_SEND_COMMAND,
"send_command_topic",
{"command": "custom command"},
"custom command",
None,
),
(
vacuum.SERVICE_STOP,
"command_topic",
None,
"stop",
None,
),
],
)
async def test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
service,
topic,
parameters,
payload,
template,
):
"""Test publishing MQTT payload with different encoding."""
domain = vacuum.DOMAIN
config = deepcopy(DEFAULT_CONFIG)
config["supported_features"] = [
"battery",
"clean_spot",
"fan_speed",
"locate",
"pause",
"return_home",
"send_command",
"start",
"status",
"stop",
]
await help_test_publishing_with_custom_encoding(
hass,
mqtt_mock,
caplog,
domain,
config,
service,
topic,
parameters,
payload,
template,
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = vacuum.DOMAIN
config = DEFAULT_CONFIG
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
@pytest.mark.parametrize(
"topic,value,attribute,attribute_value",
[
(
"state_topic",
'{"battery_level": 61, "state": "docked", "fan_speed": "off"}',
None,
"docked",
),
(
"state_topic",
'{"battery_level": 61, "state": "cleaning", "fan_speed": "medium"}',
None,
"cleaning",
),
],
)
async def test_encoding_subscribable_topics(
hass, mqtt_mock, caplog, topic, value, attribute, attribute_value
):
"""Test handling of incoming encoded payload."""
await help_test_encoding_subscribable_topics(
hass,
mqtt_mock,
caplog,
vacuum.DOMAIN,
DEFAULT_CONFIG,
topic,
value,
attribute,
attribute_value,
skip_raw_test=True,
)
|
mezz64/home-assistant
|
tests/components/mqtt/test_state_vacuum.py
|
Python
|
apache-2.0
| 21,078
|
from . import rbtree
import warnings
warnings.warn("This API is completely experimental and likely to change very soon. Use with caution.", FutureWarning)
try:
from autogenerated_path import getDrakePath
except ImportError:
from insource_path import getDrakePath
|
hanssusilo/drake
|
drake/bindings/python/pydrake/__init__.py
|
Python
|
bsd-3-clause
| 272
|
'''
Created on 7 jun. 2021
@author: Gregorio Corral
'''
from rest_framework import serializers
from colony.models import Client, Space, NetAddress
class NetAddressSerializer(serializers.ModelSerializer):
class Meta:
model = NetAddress
fields = '__all__'
class ClientSerializer(serializers.ModelSerializer):
addresses = NetAddressSerializer(read_only=True, many=True)
class Meta:
model = Client
fields = '__all__'
class SpaceSerializer(serializers.ModelSerializer):
clients = ClientSerializer(read_only=True, many=True)
class Meta:
model = Space
fields = '__all__'
|
gcorral/hivequeen
|
HiveQueen/rest_hq/serializers.py
|
Python
|
gpl-2.0
| 680
|
'Tokenize input text for the Recognizer.'
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import sys
import re
try:
import readline
except ImportError:
pass
import input_model
def prepare(separator, prompt='', in_stream=sys.stdin, out_stream=sys.stdout):
"""Read a string from the input string and return it tokenized.
Andrew Plotkin fixed this so that up arrow fetches the previous command."""
if (hasattr(in_stream, 'isatty') and in_stream.isatty()):
input_string = raw_input(prompt)
else:
out_stream.write(prompt)
input_string = in_stream.readline()
if input_string == '':
# Empty string indicates end of the input file.
# (A blank input line would look like '\n'.)
raise EOFError()
out_stream.write(input_string)
return tokenize(input_string, separator)
def tokenize(input_string, separator):
'Returns tokenized and slightly reformatted text.'
input_string = re.sub('\s*$', '', input_string)
new_text = input_string
new_text = re.sub(' *([\.\?\!\&\(\)\-\;\:\,]) *', r' \1 ', new_text)
new_text = re.sub('^[ \t]+', '', new_text)
new_text = re.sub('[ \t\n]+$', '', new_text)
new_text = re.sub('[ \t]+', ' ', new_text)
new_text = re.sub(" *' *", " '", new_text)
new_text = re.sub(" *' *", " '", new_text)
tokens = new_text.lower().split()
while len(tokens) > 0 and tokens[0] in separator:
tokens.pop(0)
while len(tokens) > 0 and tokens[-1] in separator:
tokens.pop()
user_input = input_model.RichInput(input_string, tokens)
return user_input
if __name__ == "__main__":
TEST_INPUT = prepare()
print TEST_INPUT.tokens
|
cjb/curveship
|
preparer.py
|
Python
|
isc
| 1,809
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('users', '0014_auto_20151005_1357'),
]
operations = [
migrations.AddField(
model_name='user',
name='max_private_projects',
field=models.IntegerField(null=True, verbose_name='max number of owned private projects', default=settings.MAX_PRIVATE_PROJECTS_PER_USER, blank=True),
),
migrations.AddField(
model_name='user',
name='max_public_projects',
field=models.IntegerField(null=True, verbose_name='max number of owned public projects', default=settings.MAX_PUBLIC_PROJECTS_PER_USER, blank=True),
),
]
|
xdevelsistemas/taiga-back-community
|
taiga/users/migrations/0015_auto_20160120_1409.py
|
Python
|
agpl-3.0
| 827
|
# Copyright 2019 Verily Life Sciences LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hparams_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from classifaedes import hparams_lib
import tensorflow.compat.v1 as tf
class HparamsLibTest(tf.test.TestCase):
def testIndentedSerialize(self):
"""Tests that our slightly customized serialization can be parsed.
hparams_lib._human_serialize() uses indented JSON to improve readability.
"""
hps1 = hparams_lib.defaults()
serialized = hparams_lib._human_serialize(hps1)
hps2 = hparams_lib.defaults()
hps2.parse_json(serialized)
self.assertDictEqual(hps1.values(), hps2.values())
if __name__ == '__main__':
tf.test.main()
|
verilylifesciences/classifaedes
|
classifaedes/hparams_lib_test.py
|
Python
|
apache-2.0
| 1,292
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 21:23:43 2013
@author: Yigong
"""
import sqlite3
import pandas as pd
import numpy as np
connection = sqlite3.connect('airport.db')
cursor = connection.cursor()
sql_cmd = '''CREATE TABLE IF NOT EXISTS top50(id INTEGER PRIMARY KEY AUTOINCREMENT, City TEXT, FAA TEXT, IATA TEXT, ICAO TEXT, Airport TEXT, Role TEXT, Enplanements INTEGER) '''
cursor.execute(sql_cmd)
data_df = pd.read_csv('top_airports.csv')
for i in data_df.index:
temp_row = tuple(data_df.iloc[i,:].values)
#info_temp = 'City, FAA, IATA, ICAO, Airport, Role, Enplanements'
print temp_row, '@@'
#airport = tuple(airport)
sql_cmd = ("INSERT INTO top50 (City, FAA, IATA, ICAO, Airport, Role, Enplanements) VALUES"+str(temp_row))
print sql_cmd
cursor.execute(sql_cmd)
sql_cmd = 'SELECT * FROM top50'
cursor.execute(sql_cmd)
db_info = cursor.fetchall()
print db_info
connection.commit()
connection.close()
#sql_cmd = "SELECT * FROM top50"
#cursor.execute(sql_cmd)
#top50_info = cursor.fetchall()
#for entry in top50_info:
# print entry
|
yigong/AY250
|
hw6/hw_6_data/top50.py
|
Python
|
mit
| 1,090
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Court',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=400)),
('courttype', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('clientname', models.CharField(max_length=100)),
('mobile', models.IntegerField()),
('case_describe', models.TextField(max_length=2000)),
('status', models.CharField(max_length=1, choices=[(b'a', b'Approved'), (b's', b'Submitted'), (b'r', b'Rejected'), (b'p', b'Progress')])),
('hearing_date', models.DateTimeField(null=True, blank=True)),
('court', models.ForeignKey(to='advocates.Court')),
],
),
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('casetype', models.CharField(max_length=50)),
('description', models.TextField(max_length=1000)),
],
),
migrations.AddField(
model_name='session',
name='track',
field=models.ForeignKey(to='advocates.Track'),
),
]
|
khansrk/khanadvocates
|
advocates/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,766
|
#!/usr/bin/env python
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_head (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_blks_import(self):
# make sure that this somewhat magic import works
from gnuradio import blks
def test_gru_import(self):
# make sure that this somewhat magic import works
from gnuradio import gru
if __name__ == '__main__':
gr_unittest.main ()
|
trnewman/VT-USRP-daughterboard-drivers_python
|
gnuradio-core/src/python/gnuradio/gr/qa_kludged_imports.py
|
Python
|
gpl-3.0
| 1,262
|
#!/usr/bin/env python
#
# Jetduino Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick)
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
# Connect the Grove Thumb Joystick to analog port A0
# GrovePi Port A0 uses Arduino pins 0 and 1
# GrovePi Port A1 uses Arduino pins 1 and 2
# Don't plug anything into port A1 that uses pin 1
# Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports
# If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything
# If you wish to connect two joysticks, use ports A0 and A2 (skip A1)
# Uses two pins - one for the X axis and one for the Y axis
# This configuration means you are using port A0
xPin = ARD_A0
yPin = ARD_A2
jetduino.pinMode(xPin, INPUT_PIN)
jetduino.pinMode(yPin, INPUT_PIN)
# The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023
# The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis
# My joystick produces slightly different results to the specifications found on the url above
# I've listed both here:
# Specifications
# Min Typ Max Click
# X 206 516 798 1023
# Y 203 507 797
# My Joystick
# Min Typ Max Click
# X 253 513 766 1020-1023
# Y 250 505 769
while True:
try:
# Get X/Y coordinates
x = jetduino.analogRead(xPin)
y = jetduino.analogRead(yPin)
# Calculate X/Y resistance
Rx = (float)(1023 - x) * 10 / x
Ry = (float)(1023 - y) * 10 / y
# Was a click detected on the X axis?
click = 1 if x >= 1020 else 0
print ("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click)
time.sleep(.5)
except IOError:
print ("Error")
|
NeuroRoboticTech/Jetduino
|
Software/Python/grove_thumb_joystick.py
|
Python
|
mit
| 3,602
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# -*- python -*-
import sys, os, getpass, pprint, re, cPickle, random, shutil, time, errno
import hodlib.Common.logger
from hodlib.ServiceRegistry.serviceRegistry import svcrgy
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.util import to_http_url, get_exception_string
from hodlib.Common.util import get_exception_error_string
from hodlib.Common.util import hodInterrupt, HodInterruptException
from hodlib.Common.util import HOD_INTERRUPTED_CODE
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Hod.hadoop import hadoopCluster, hadoopScript
CLUSTER_DATA_FILE = 'clusters'
INVALID_STATE_FILE_MSGS = \
[
"Requested operation cannot be performed. Cannot read %s: " + \
"Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot write to %s: Permission denied.",
"Requested operation cannot be performed. " + \
"Cannot read/write to %s: Permission denied.",
"Cannot update %s: Permission denied. " + \
"Cluster is deallocated, but info and list " + \
"operations might show incorrect information.",
]
class hodState:
def __init__(self, store):
self.__store = store
self.__stateFile = None
self.__init_store()
self.__STORE_EXT = ".state"
def __init_store(self):
if not os.path.exists(self.__store):
os.mkdir(self.__store)
def __set_state_file(self, id=None):
if id:
self.__stateFile = os.path.join(self.__store, "%s%s" % (id,
self.__STORE_EXT))
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
self.__stateFile = os.path.join(self.__store, item)
def get_state_file(self):
return self.__stateFile
def checkStateFile(self, id=None, modes=(os.R_OK,)):
# is state file exists/readable/writable/both?
self.__set_state_file(id)
# return true if file doesn't exist, because HOD CAN create
# state file and so WILL have permissions to read and/or write
try:
os.stat(self.__stateFile)
except OSError, err:
if err.errno == errno.ENOENT: # error 2 (no such file)
return True
# file exists
ret = True
for mode in modes:
ret = ret and os.access(self.__stateFile, mode)
return ret
def read(self, id=None):
info = {}
self.__set_state_file(id)
if self.__stateFile:
if os.path.isfile(self.__stateFile):
stateFile = open(self.__stateFile, 'r')
try:
info = cPickle.load(stateFile)
except EOFError:
pass
stateFile.close()
return info
def write(self, id, info):
self.__set_state_file(id)
if not os.path.exists(self.__stateFile):
self.clear(id)
stateFile = open(self.__stateFile, 'w')
cPickle.dump(info, stateFile)
stateFile.close()
def clear(self, id=None):
self.__set_state_file(id)
if self.__stateFile and os.path.exists(self.__stateFile):
os.remove(self.__stateFile)
else:
for item in os.listdir(self.__store):
if item.endswith(self.__STORE_EXT):
os.remove(item)
class hodRunner:
def __init__(self, cfg, log=None, cluster=None):
self.__hodhelp = hodHelp()
self.__ops = self.__hodhelp.ops
self.__cfg = cfg
self.__npd = self.__cfg['nodepooldesc']
self.__opCode = 0
self.__user = getpass.getuser()
self.__registry = None
self.__baseLogger = None
# Allowing to pass in log object to help testing - a stub can be passed in
if log is None:
self.__setup_logger()
else:
self.__log = log
self.__userState = hodState(self.__cfg['hod']['user_state'])
self.__clusterState = None
self.__clusterStateInfo = { 'env' : None, 'hdfs' : None, 'mapred' : None }
# Allowing to pass in log object to help testing - a stib can be passed in
if cluster is None:
self.__cluster = hadoopCluster(self.__cfg, self.__log)
else:
self.__cluster = cluster
def __setup_logger(self):
self.__baseLogger = hodlib.Common.logger.hodLog('hod')
self.__log = self.__baseLogger.add_logger(self.__user )
if self.__cfg['hod']['stream']:
self.__baseLogger.add_stream(level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
if self.__cfg['hod'].has_key('syslog-address'):
self.__baseLogger.add_syslog(self.__cfg['hod']['syslog-address'],
level=self.__cfg['hod']['debug'],
addToLoggerNames=(self.__user ,))
def get_logger(self):
return self.__log
def __setup_cluster_logger(self, directory):
self.__baseLogger.add_file(logDirectory=directory, level=4,
backupCount=self.__cfg['hod']['log-rollover-count'],
addToLoggerNames=(self.__user ,))
def __setup_cluster_state(self, directory):
self.__clusterState = hodState(directory)
def __norm_cluster_dir(self, directory):
directory = os.path.expanduser(directory)
if not os.path.isabs(directory):
directory = os.path.join(self.__cfg['hod']['original-dir'], directory)
directory = os.path.abspath(directory)
return directory
def __setup_service_registry(self):
cfg = self.__cfg['hod'].copy()
cfg['debug'] = 0
self.__registry = svcrgy(cfg, self.__log)
self.__registry.start()
self.__log.debug(self.__registry.getXMLRPCAddr())
self.__cfg['hod']['xrs-address'] = self.__registry.getXMLRPCAddr()
self.__cfg['ringmaster']['svcrgy-addr'] = self.__cfg['hod']['xrs-address']
def __set_cluster_state_info(self, env, hdfs, mapred, ring, jobid, min, max):
self.__clusterStateInfo['env'] = env
self.__clusterStateInfo['hdfs'] = "http://%s" % hdfs
self.__clusterStateInfo['mapred'] = "http://%s" % mapred
self.__clusterStateInfo['ring'] = ring
self.__clusterStateInfo['jobid'] = jobid
self.__clusterStateInfo['min'] = min
self.__clusterStateInfo['max'] = max
def __set_user_state_info(self, info):
userState = self.__userState.read(CLUSTER_DATA_FILE)
for key in info.keys():
userState[key] = info[key]
self.__userState.write(CLUSTER_DATA_FILE, userState)
def __remove_cluster(self, clusterDir):
clusterInfo = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterInfo:
del(clusterInfo[clusterDir])
self.__userState.write(CLUSTER_DATA_FILE, clusterInfo)
def __cleanup(self):
if self.__registry: self.__registry.stop()
def __check_operation(self, operation):
opList = operation.split()
if not opList[0] in self.__ops:
self.__log.critical("Invalid hod operation specified: %s" % operation)
self._op_help(None)
self.__opCode = 2
return opList
def __adjustMasterFailureCountConfig(self, nodeCount):
# This method adjusts the ringmaster.max-master-failures variable
# to a value that is bounded by the a function of the number of
# nodes.
maxFailures = self.__cfg['ringmaster']['max-master-failures']
# Count number of masters required - depends on which services
# are external
masters = 0
if not self.__cfg['gridservice-hdfs']['external']:
masters += 1
if not self.__cfg['gridservice-mapred']['external']:
masters += 1
# So, if there are n nodes and m masters, we look atleast for
# all masters to come up. Therefore, atleast m nodes should be
# good, which means a maximum of n-m master nodes can fail.
maxFailedNodes = nodeCount - masters
# The configured max number of failures is now bounded by this
# number.
self.__cfg['ringmaster']['max-master-failures'] = \
min(maxFailures, maxFailedNodes)
def _op_allocate(self, args):
operation = "allocate"
argLength = len(args)
min = 0
max = 0
errorFlag = False
errorMsgs = []
if argLength == 3:
nodes = args[2]
clusterDir = self.__norm_cluster_dir(args[1])
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" \
% (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(nodes) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.__opCode = 3
return
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, \
(os.R_OK, os.W_OK)):
self.__log.critical(INVALID_STATE_FILE_MSGS[2] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
# Check if the job is not running. Only then can we safely
# allocate another cluster. Otherwise the user would need
# to deallocate and free up resources himself.
if clusterInfo.has_key('jobid') and \
self.__cluster.is_cluster_deallocated(clusterInfo['jobid']):
self.__log.warn("Found a dead cluster at cluster directory '%s'. Deallocating it to allocate a new one." % (clusterDir))
self.__remove_cluster(clusterDir)
self.__clusterState.clear()
else:
self.__log.critical("Found a previously allocated cluster at cluster directory '%s'. HOD cannot determine if this cluster can be automatically deallocated. Deallocate the cluster if it is unused." % (clusterDir))
self.__opCode = 12
return
self.__setup_cluster_logger(clusterDir)
(status, message) = self.__cluster.is_valid_account()
if status is not 0:
if message:
for line in message:
self.__log.critical("verify-account output: %s" % line)
self.__log.critical("Cluster cannot be allocated because account verification failed. " \
+ "verify-account returned exit code: %s." % status)
self.__opCode = 4
return
else:
self.__log.debug("verify-account returned zero exit code.")
if message:
self.__log.debug("verify-account output: %s" % message)
if re.match('\d+-\d+', nodes):
(min, max) = nodes.split("-")
min = int(min)
max = int(max)
else:
try:
nodes = int(nodes)
min = nodes
max = nodes
except ValueError:
print self.__hodhelp.help(operation)
self.__log.critical(
"%s operation requires a pos_int value for n(nodecount)." %
operation)
self.__opCode = 3
else:
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
self.__opCode = self.__cluster.check_cluster(clusterInfo)
if self.__opCode == 0 or self.__opCode == 15:
self.__setup_service_registry()
if hodInterrupt.isSet():
self.__cleanup()
raise HodInterruptException()
self.__log.debug("Service Registry started.")
self.__adjustMasterFailureCountConfig(nodes)
try:
allocateStatus = self.__cluster.allocate(clusterDir, min, max)
except HodInterruptException, h:
self.__cleanup()
raise h
# Allocation has gone through.
# Don't care about interrupts any more
try:
if allocateStatus == 0:
self.__set_cluster_state_info(os.environ,
self.__cluster.hdfsInfo,
self.__cluster.mapredInfo,
self.__cluster.ringmasterXRS,
self.__cluster.jobId,
min, max)
self.__setup_cluster_state(clusterDir)
self.__clusterState.write(self.__cluster.jobId,
self.__clusterStateInfo)
# Do we need to check for interrupts here ??
self.__set_user_state_info(
{ clusterDir : self.__cluster.jobId, } )
self.__opCode = allocateStatus
except Exception, e:
# Some unknown problem.
self.__cleanup()
self.__cluster.deallocate(clusterDir, self.__clusterStateInfo)
self.__opCode = 1
raise Exception(e)
elif self.__opCode == 12:
self.__log.critical("Cluster %s already allocated." % clusterDir)
elif self.__opCode == 10:
self.__log.critical("dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 13:
self.__log.warn("hdfs dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
elif self.__opCode == 14:
self.__log.warn("mapred dead\t%s\t%s" % (clusterInfo['jobid'],
clusterDir))
if self.__opCode > 0 and self.__opCode != 15:
self.__log.critical("Cannot allocate cluster %s" % clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires two arguments. " % operation
+ "A cluster directory and a nodecount.")
self.__opCode = 3
def _is_cluster_allocated(self, clusterDir):
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo != {}:
return True
return False
def _op_deallocate(self, args):
operation = "deallocate"
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
self.__opCode = \
self.__cluster.deallocate(clusterDir, clusterInfo)
# irrespective of whether deallocate failed or not\
# remove the cluster state.
self.__clusterState.clear()
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[3] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
else:
self.__handle_invalid_cluster_directory(clusterDir, cleanUp=True)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def _op_list(self, args):
operation = 'list'
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
for path in clusterList.keys():
if not os.path.isdir(path):
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
self.__setup_cluster_state(path)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__log.info("cluster state unknown\t%s\t%s" % (clusterList[path], path))
continue
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__log.info("alive\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 10:
self.__log.info("dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 13:
self.__log.info("hdfs dead\t%s\t%s" % (clusterList[path], path))
elif clusterStatus == 14:
self.__log.info("mapred dead\t%s\t%s" % (clusterList[path], path))
def _op_info(self, args):
operation = 'info'
argLength = len(args)
if argLength == 2:
clusterDir = self.__norm_cluster_dir(args[1])
if os.path.isdir(clusterDir):
self.__setup_cluster_state(clusterDir)
clusterInfo = self.__clusterState.read()
if clusterInfo == {}:
# something wrong with the cluster directory.
self.__handle_invalid_cluster_directory(clusterDir)
else:
clusterStatus = self.__cluster.check_cluster(clusterInfo)
if clusterStatus == 12:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
elif clusterStatus == 10:
self.__log.critical("%s cluster is dead" % clusterDir)
elif clusterStatus == 13:
self.__log.warn("%s cluster hdfs is dead" % clusterDir)
elif clusterStatus == 14:
self.__log.warn("%s cluster mapred is dead" % clusterDir)
if clusterStatus != 12:
if clusterStatus == 15:
self.__log.critical("Cluster %s not allocated." % clusterDir)
else:
self.__print_cluster_info(clusterInfo)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
self.__opCode = clusterStatus
else:
self.__handle_invalid_cluster_directory(clusterDir)
else:
print self.__hodhelp.help(operation)
self.__log.critical("%s operation requires one argument. " % operation
+ "A cluster path.")
self.__opCode = 3
def __handle_invalid_cluster_directory(self, clusterDir, cleanUp=False):
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return
clusterList = self.__userState.read(CLUSTER_DATA_FILE)
if clusterDir in clusterList.keys():
# previously allocated cluster.
self.__log.critical("Cannot find information for cluster with id '%s' in previously allocated cluster directory '%s'." % (clusterList[clusterDir], clusterDir))
if cleanUp:
self.__cluster.delete_job(clusterList[clusterDir])
self.__log.critical("Freeing resources allocated to the cluster.")
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.W_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[1] % \
self.__userState.get_state_file())
self.__opCode = 1
return
self.__remove_cluster(clusterDir)
self.__opCode = 3
else:
if not os.path.exists(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : No such directory")
elif not os.path.isdir(clusterDir):
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not a directory")
else:
self.__log.critical( \
"Invalid hod.clusterdir(--hod.clusterdir or -d). " + \
clusterDir + " : Not tied to any allocated cluster.")
self.__opCode = 15
def __print_cluster_info(self, clusterInfo):
keys = clusterInfo.keys()
_dict = {
'jobid' : 'Cluster Id', 'min' : 'Nodecount',
'hdfs' : 'HDFS UI at' , 'mapred' : 'Mapred UI at'
}
for key in _dict.keys():
if clusterInfo.has_key(key):
self.__log.info("%s %s" % (_dict[key], clusterInfo[key]))
if clusterInfo.has_key('ring'):
self.__log.debug("%s\t%s" % ('Ringmaster at ', clusterInfo['ring']))
if self.__cfg['hod']['debug'] == 4:
for var in clusterInfo['env'].keys():
self.__log.debug("%s = %s" % (var, clusterInfo['env'][var]))
def _op_help(self, arg):
if arg == None or arg.__len__() != 2:
print "hod commands:\n"
for op in self.__ops:
print self.__hodhelp.help(op)
else:
if arg[1] not in self.__ops:
print self.__hodhelp.help('help')
self.__log.critical("Help requested for invalid operation : %s"%arg[1])
self.__opCode = 3
else: print self.__hodhelp.help(arg[1])
def operation(self):
operation = self.__cfg['hod']['operation']
try:
opList = self.__check_operation(operation)
if self.__opCode == 0:
if not self.__userState.checkStateFile(CLUSTER_DATA_FILE, (os.R_OK,)):
self.__log.critical(INVALID_STATE_FILE_MSGS[0] % \
self.__userState.get_state_file())
self.__opCode = 1
return self.__opCode
getattr(self, "_op_%s" % opList[0])(opList)
except HodInterruptException, h:
self.__log.critical("op: %s failed because of a process interrupt." \
% operation)
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("op: %s failed: %s" % (operation,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.__log.debug("return code: %s" % self.__opCode)
return self.__opCode
def script(self):
errorFlag = False
errorMsgs = []
scriptRet = 0 # return from the script, if run
script = self.__cfg['hod']['script']
nodes = self.__cfg['hod']['nodecount']
clusterDir = self.__cfg['hod']['clusterdir']
if not os.path.exists(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : No such file")
elif not os.path.isfile(script):
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not a file.")
else:
isExecutable = os.access(script, os.X_OK)
if not isExecutable:
errorFlag = True
errorMsgs.append("Invalid script file (--hod.script or -s) : " + \
script + " : Not an executable.")
if not os.path.exists(clusterDir):
try:
os.makedirs(clusterDir)
except OSError, err:
errorFlag = True
errorMsgs.append("Could not create cluster directory. %s" % (str(err)))
elif not os.path.isdir(clusterDir):
errorFlag = True
errorMsgs.append( \
"Invalid cluster directory (--hod.clusterdir or -d) : " + \
clusterDir + " : Not a directory")
if int(self.__cfg['hod']['nodecount']) < 3 :
errorFlag = True
errorMsgs.append("Invalid nodecount (--hod.nodecount or -n) : " + \
"Must be >= 3. Given nodes: %s" % nodes)
if errorFlag:
for msg in errorMsgs:
self.__log.critical(msg)
self.handle_script_exit_code(scriptRet, clusterDir)
sys.exit(3)
try:
self._op_allocate(('allocate', clusterDir, str(nodes)))
if self.__opCode == 0:
if self.__cfg['hod'].has_key('script-wait-time'):
time.sleep(self.__cfg['hod']['script-wait-time'])
self.__log.debug('Slept for %d time. Now going to run the script' % self.__cfg['hod']['script-wait-time'])
if hodInterrupt.isSet():
self.__log.debug('Hod interrupted - not executing script')
else:
scriptRunner = hadoopScript(clusterDir,
self.__cfg['hod']['original-dir'])
self.__opCode = scriptRunner.run(script)
scriptRet = self.__opCode
self.__log.info("Exit code from running the script: %d" % self.__opCode)
else:
self.__log.critical("Error %d in allocating the cluster. Cannot run the script." % self.__opCode)
if hodInterrupt.isSet():
# Got interrupt while executing script. Unsetting it for deallocating
hodInterrupt.setFlag(False)
if self._is_cluster_allocated(clusterDir):
self._op_deallocate(('deallocate', clusterDir))
except HodInterruptException, h:
self.__log.critical("Script failed because of a process interrupt.")
self.__opCode = HOD_INTERRUPTED_CODE
except:
self.__log.critical("script: %s failed: %s" % (script,
get_exception_error_string()))
self.__log.debug(get_exception_string())
self.__cleanup()
self.handle_script_exit_code(scriptRet, clusterDir)
return self.__opCode
def handle_script_exit_code(self, scriptRet, clusterDir):
# We want to give importance to a failed script's exit code, and write out exit code to a file separately
# so users can easily get it if required. This way they can differentiate between the script's exit code
# and hod's exit code.
if os.path.exists(clusterDir):
exit_code_file_name = (os.path.join(clusterDir, 'script.exitcode'))
if scriptRet != 0:
exit_code_file = open(exit_code_file_name, 'w')
print >>exit_code_file, scriptRet
exit_code_file.close()
self.__opCode = scriptRet
else:
#ensure script exit code file is not there:
if (os.path.exists(exit_code_file_name)):
os.remove(exit_code_file_name)
class hodHelp:
def __init__(self):
self.ops = ['allocate', 'deallocate', 'info', 'list','script', 'help']
self.usage_strings = \
{
'allocate' : 'hod allocate -d <clusterdir> -n <nodecount> [OPTIONS]',
'deallocate' : 'hod deallocate -d <clusterdir> [OPTIONS]',
'list' : 'hod list [OPTIONS]',
'info' : 'hod info -d <clusterdir> [OPTIONS]',
'script' :
'hod script -d <clusterdir> -n <nodecount> -s <script> [OPTIONS]',
'help' : 'hod help <OPERATION>',
}
self.description_strings = \
{
'allocate' : "Allocates a cluster of n nodes using the specified \n" + \
" cluster directory to store cluster state \n" + \
" information. The Hadoop site XML is also stored \n" + \
" in this location.\n",
'deallocate' : "Deallocates a cluster using the specified \n" + \
" cluster directory. This operation is also \n" + \
" required to clean up a dead cluster.\n",
'list' : "List all clusters currently allocated by a user, \n" + \
" along with limited status information and the \n" + \
" cluster ID.\n",
'info' : "Provide detailed information on an allocated cluster.\n",
'script' : "Allocates a cluster of n nodes with the given \n" +\
" cluster directory, runs the specified script \n" + \
" using the allocated cluster, and then \n" + \
" deallocates the cluster.\n",
'help' : "Print help for the operation and exit.\n" + \
"Available operations : %s.\n" % self.ops,
}
def usage(self, op):
return "Usage : " + self.usage_strings[op] + "\n" + \
"For full description: hod help " + op + ".\n"
def help(self, op=None):
if op is None:
return "hod <operation> [ARGS] [OPTIONS]\n" + \
"Available operations : %s\n" % self.ops + \
"For help on a particular operation : hod help <operation>.\n" + \
"For all options : hod help options."
else:
return "Usage : " + self.usage_strings[op] + "\n" + \
"Description : " + self.description_strings[op] + \
"For all options : hod help options.\n"
|
ZhangXFeng/hadoop
|
src/hadoop-mapreduce1-project/src/contrib/hod/hodlib/Hod/hod.py
|
Python
|
apache-2.0
| 29,420
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hdf5VfdGds(CMakePackage, CudaPackage):
"""This package enables GPU Direct Storage Virtual File Driver in HDF5."""
# Package info
homepage = 'https://github.com/hpc-io/vfd-gds'
url = 'https://github.com/hpc-io/vfd-gds/archive/refs/tags/1.0.1.tar.gz'
git = 'https://github.com/hpc-io/vfd-gds.git'
maintainers = ['hyoklee', 'lrknox']
# Versions
version('master', branch='master')
version('1.0.1', sha256='00e125fd149561be991f41e883824de826d8add604aebccf103a4fb82d5faac2')
version('1.0.0', sha256='6b16105c7c49f13fc05784ee69b78d45fb159270c78d760689f9cd21e230ddd2')
# Dependencies
conflicts('~cuda')
depends_on('cmake@3.12:')
depends_on('hdf5@1.13.0:')
def cmake_args(self):
# CMake options
args = [
self.define('BUILD_TESTING', self.run_tests),
]
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/hdf5-vfd-gds/package.py
|
Python
|
lgpl-2.1
| 1,102
|
'''
Created on Sep 28, 2013
@author: justin
'''
class databaseConstants():
'''
Holds the metadata behind the database creation
'''
_connDict = {
'DBType':'mysql',
'DBUsername':'root',
'DBPassword':'Ginger*12',
'DBNAme':'SportsStatistics',
'DBServer':'localhost'
}
#Connection Information
DBType = _connDict['DBType']
DBUsername = _connDict['DBUsername']
DBPassword = _connDict['DBPassword']
DBServer = _connDict['DBServer']
#Database List
DatabaseList = [
'DBStats',
'DBStatDependancyCompilation'
]
#Table list, dictionary key must match a db above
TableList = {
'DBStats':[
'TestTable',
'TestTable1',
'TestTable2',
'TestTable3'
],
'DBStatDependancyCompilation':[
'TestTable',
'TestTable1',
'TestTable2',
'TestTable3'
]
}
def __init__(self):
'''
All Database Metadata functionality
'''
print 'Initializing database constants '
def ConnectionString(self):
'''
Gets the connection string for the database
'''
print 'retrieving connection string'
return str(self.GetDatabaseType()) +'://' + str(self.DBUsername) + ':' + str(self.DBPassword) + '@' + str(self.DBServer)
def GetDatabaseType(self):
'''
Returns the current database driver
using SQLAlchemy
'''
print 'retrieving database type'
return self.DBType
def GetDatabaseName(self):
'''
Gets the database name
'''
print 'retrieving connection string'
return self.DBName
def GetDBPassword(self):
'''
Gets the database user's password
'''
print 'retrieving database password'
return self.DBPassword
def GetDatabaseList(self):
'''
Gets the list of all databases to create
'''
print 'retrieving list of all databases'
return self.DatabaseList()
def GetAllDatabaseTables(self):
'''
Iterates through the defined list of
DB's and then uses the TableList dict
to then iterate through the tables
'''
print 'retrieving all tables'
for db in self.DatabaseList():
print str(db)
|
JJarczyk12/Sports-Statistical-Scraper
|
SportsStatistics/model/databaseConstants.py
|
Python
|
mit
| 2,575
|
#!/usr/bin/env python
"""
This file is a series of tasks to preprocess COBRE dataset
Installation
------------
It runs on Python > 3.3 or Python2.7 and uses invoke (or Fabric when a Python3 version is released) to execute
the tasks from the command line.
- requirements
pip install invoke
pip install git@github.com:Neurita/boyle.git
- optional requirement (for caching results):
pip install joblib
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import re
import shutil
import logging
import os.path as op
import numpy as np
from functools import partial
from subprocess import Popen, PIPE
from collections import OrderedDict
from boyle.mhd.write import copy_mhd_and_raw
from boyle.commands import which
from boyle.utils.strings import count_hits, where_is
from boyle.utils.text_files import read
from boyle.utils.rcfile import (rcfile, get_sections, get_sys_path, find_in_sections,
get_rcfile_section, get_rcfile_variable_value)
from boyle.files.search import recursive_find_match, check_file_exists
from boyle.files.names import get_extension, remove_ext
from boyle.nifti.cpac_helpers import xfm_atlas_to_functional
from boyle.nifti.roi import partition_timeseries
from boyle.storage import save_variables_to_hdf5
from invoke import task
from invoke import run as local
# setup log
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
# read configurations
APPNAME = 'cobre'
try:
CFG = rcfile(APPNAME)
RAW_DIR = op.expanduser(CFG['raw_dir' ])
PREPROC_DIR = op.expanduser(CFG['preproc_dir'])
CACHE_DIR = op.expanduser(CFG['cache_dir' ])
EXPORTS_DIR = op.expanduser(CFG['exports_dir'])
ATLAS_DIR = op.expanduser(CFG['atlas_dir' ])
STD_DIR = op.expanduser(CFG['std_dir' ])
DATA_DIR = PREPROC_DIR
# read files_of_interest section
FOI_CFG = rcfile(APPNAME, 'files_of_interest')
except:
log.exception('Error reading config variable from settings in {} rcfiles.'.format(APPNAME))
raise
def verbose_switch(verbose=False):
if verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.getLogger().setLevel(log_level)
@task
def clean_cache(cache_dir=CACHE_DIR):
"""Remove joblib cache folder"""
cache_dir = op.expanduser(cache_dir)
log.info('Removing cache folder {}'.format(cache_dir))
shutil.rmtree(cache_dir)
@task(autoprint=True)
def get_rc_sections(app_name=APPNAME):
"""Return the available rcfiles sections"""
sections = get_sections(app_name)
return sections
@task
def show_configuration(app_name=APPNAME, section=None):
""" Show the rcfile configuration variables for the given application.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
section: str
Rcfile section name
"""
cfg = rcfile(app_name, section)
for i in cfg:
print("{} : {}".format(i, cfg[i]))
if section is not None:
return
sections = get_sections(app_name)
for s in sections:
if app_name not in s:
print('')
print('[{}]'.format(s))
cfg = rcfile(app_name, s)
for i in cfg:
print("{} : {}".format(i, cfg[i]))
@task(autoprint=True)
def get_subject_labels(app_name=APPNAME, subj_labels_file_varname='subj_labels'):
""" Return the class labels of all subjects in a list
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_labels_file_varname: str
Name of the rcfile variable that holds the path to the subject labels file.
Returns
-------
labels
list of int
"""
file_path = op.realpath(op.expanduser(CFG.get(subj_labels_file_varname, None)))
if file_path is None:
raise KeyError('Could not find variable {} in {} rcfile.'.format(subj_labels_file_varname, app_name))
return np.loadtxt(file_path, dtype=int, delimiter='\n')
def read_subject_ids_file(app_name=APPNAME, subj_id_list_varname='subj_id_list_file'):
""" Return the content of the subject_id file in a list.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
Returns
-------
subject_ids: list of str
"""
file_path = op.realpath(op.expanduser(CFG.get(subj_id_list_varname, None)))
if file_path is None:
raise KeyError('Could not find variable {} in {} rcfile.'.format(subj_id_list_varname, app_name))
log.debug('Reading list of subject ids from file {}.'.format(file_path))
return read(file_path).split('\n')
@task(autoprint=True)
def get_subject_ids(app_name=APPNAME, subj_id_list_varname='subj_id_list_file', remove_commented=False):
""" Return the class labels of all subjects in a list
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
remove_commented: bool
If True will remove the ids that are commented with a '#'. Will return them all, otherwise.
Returns
-------
subject_ids: list of str
"""
subj_ids = read_subject_ids_file(app_name, subj_id_list_varname)
if remove_commented:
subj_ids = [id for id in subj_ids if not id.startswith('#')]
else:
subj_ids = [id.replace('#', '').strip() for id in subj_ids]
return subj_ids
@task(autoprint=True)
def get_filtered_subjects_ids_and_labels(app_name=APPNAME, subj_id_list_varname='subj_id_list_file',
subj_id_regex_varname='subj_id_regex'):
"""Will use the value of subj_id_regex variable to filter out the subject ids that do not match on the
subj_id_list_file of the rcfile. Will also return filtered labels.
The recommendation is to add a '#' character in front of the IDs that you want excluded from the experiment.
Parameters
----------
app_name: str
Name of the application to look for rcfiles.
subj_id_list_varname: str
Name of the rcfile variable that holds the path to the subject ids file.
subj_id_regex_varname: str
Regular expression
Returns
-------
filt_ids: list of str
The subject ids that match the subject_id regex variable from the rcfile.
"""
subj_ids = read_subject_ids_file(app_name, subj_id_list_varname)
labels = get_subject_labels()
matches_subj_regex = partial(re.match, CFG[subj_id_regex_varname])
log.debug('Filtering list of files using subjects ids from subject ids file.')
filt_ids = []
filt_labs = []
for idx, sid in enumerate(subj_ids):
if matches_subj_regex(sid) is not None:
filt_ids.append (sid)
filt_labs.append(labels[idx])
return filt_ids, filt_labs
@task(autoprint=True)
def get_subject_ids_and_labels(filter_by_subject_ids=False):
if filter_by_subject_ids:
subj_ids, labels = get_filtered_subjects_ids_and_labels()
else:
subj_ids = get_subject_ids()
labels = get_subject_labels()
return subj_ids, labels
def filter_list_by_subject_ids(files, subject_ids):
""" Look for matches of each subject_id in the files list, if a match is not found, the file is removed.
The filtered list is then returned.
Parameters
----------
files: list of str
List of file paths that contain a subject id
subject_ids: list of str
List of subject ids that you want included in files
Returns
-------
filtered_list: list of str
List of file paths that contain any of the subject ids in subject_ids
"""
if files is None or not files:
return files
if subject_ids is None or not subject_ids:
return files
log.debug('Filtering list of files using subjects ids.')
filt_files = []
for fn in files:
if any(re.search(sid, fn) for sid in subject_ids):
filt_files.append(fn)
return filt_files
def call_and_logit(cmd, logfile='logfile.log'):
"""Call cmd line with shell=True and saves the output and error output in logfile"""
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if not logfile:
return rc
try:
with open(logfile, 'a') as alog:
alog.write(output.decode("utf-8"))
alog.write( err.decode("utf-8"))
except:
log.exception('Error writing logfile {}.'.format(logfile))
finally:
return rc
@task
def compress_niftis(work_dir=DATA_DIR, verbose=False):
"""Compress nifti files within work_dir using fslchfiletype command."""
if not which('fslchfiletype'):
log.error('Cannot find fslchfiletype to compress NifTi files. Passing.')
return -1
verbose_switch(verbose)
niftis = recursive_find_match(work_dir, '.*nii$')
niftis.sort()
for nii in niftis:
log.debug('Compressing {}'.format(nii))
local('fslchfiletype NIFTI_GZ {}'.format(nii))
@task
def rename_files_of_interest(work_dir=DATA_DIR, verbose=False):
"""Look through the work_dir looking to the patterns matches indicated in the files_of_interest section of the
config file.
For each match it creates a copy of the file in the same folder renamed to the names of the section configuration
option.
This will keep the file extensions and adding '+' characters if there are more than one match.
"""
verbose_switch(verbose)
def copy_file(src, dst):
dirname = op.dirname (src)
ext = get_extension(src)
dst = op.basename (dst)
dst = op.join(dirname, remove_ext(dst))
# add many '+' to the files that have repeated names
#while op.exists(dst + ext):
# dst += '+'
#dst += ext
# add a counter value to the files that have repeated names
if op.exists(dst + ext):
fc = 2
while op.exists(dst + str(fc) + ext):
fc += 1
dst += str(fc) + ext
# copy the src file to the given dst value
try:
if ext == '.mhd':
return copy_mhd_and_raw(src, dst)
else:
shutil.copyfile(src, dst)
return dst
except:
log.exception('Error copying file {} to {}.'.format(src, dst))
raise
def has_mhd_with_raws(files):
"""Return True if the number of .raw files is the same as the number of .mhd files"""
return count_hits(files, '.*\.raw$') == count_hits(files, '.*\.mhd$')
for foi in FOI_CFG:
regex = FOI_CFG[foi]
files = recursive_find_match(work_dir, regex)
files.sort()
if not files:
log.error('Could not find {} files that match {} within {}.'.format(foi, regex, work_dir))
continue
use_copy_mhd_and_raw = has_mhd_with_raws(files)
log.debig('Copying {} to {}.'.format(regex, foi))
for fn in files:
ext = get_extension(fn)
if use_copy_mhd_and_raw:
if ext == '.raw':
continue
new_fn = op.join(op.dirname(fn), foi) + ext
try:
new_dst = copy_file(fn, new_fn)
except:
msg = 'Error copying file {} to {}.'.format(fn, new_fn)
log.exception(msg)
raise IOError(msg)
if not op.exists(new_dst):
msg = 'Error copying file {} to {}. After trying to copy, the file does not exist.'.format(fn, new_dst)
log.error(msg)
@task
def remove_files_of_interest(work_dir=DATA_DIR, verbose=True):
"""Look through the work_dir looking to the patterns matches indicated in the files_of_interest section of
the config file and remove them.
"""
verbose_switch(verbose)
for foi in FOI_CFG:
regex = get_file_of_interest_regex(foi)
log.info('Removing within {} that match {}.'.format(len(work_dir), regex))
remove_files(regex, work_dir, verbose)
@task
def remove_files(pattern, work_dir=DATA_DIR, verbose=False):
"""Look through the work_dir looking to the patterns matches the pattern argument value and remove them.
"""
verbose_switch(verbose)
import sys
try:
from distutils.util import strtobool
raw_input = input
except:
from distutils import strtobool
def prompt(query):
sys.stdout.write('%s [y/n]: ' % query)
val = raw_input()
try:
ret = strtobool(val)
except ValueError:
sys.stdout.write('Please answer with a y/n\n')
return prompt(query)
return ret
files = find_files(work_dir, pattern)
if not files:
log.info('Could not find files that match r"{0}" within {1}.'.format(pattern, work_dir))
return
log.info('\n'.join(files))
if prompt('Found these files. Want to remove?'):
for fn in files:
log.debug('Removing {}.'.format(fn))
os.remove(fn)
@task(autoprint=True)
def find_files(work_dir, regex):
""" Returns a list of the files that match the regex value within work_dir.
Parameters
----------
work_dir: str
Path of the root folder from where to start the search.s
regex: str
Name of the variable in files_of_interest section.
"""
try:
check_file_exists(work_dir)
except:
return []
files = recursive_find_match(work_dir, regex)
files.sort()
return files
@task(autoprint=True)
def get_file_of_interest_regex(name, app_name=APPNAME):
"""Return the regex of the name variable in the files_of_interest section of the app rc file."""
return get_rcfile_variable_value(name, 'files_of_interest', app_name)
def print_list(alist):
[print(i) for i in alist]
@task
def show_regex_match(regex, work_dir=DATA_DIR, filter_by_subject_ids=False):
"""Lists the files inside work_dir that match the name of the given regex.
Parameters
----------
regex: str
Regular expession
work_dir: str
Path of the root folder from where to start the search.
Or, if the given name is not an existing path, name of the rcfile variable that contains the folder path.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
try:
check_file_exists(work_dir)
except:
work_dir = op.expanduser(CFG[work_dir])
files = find_files(work_dir, regex)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
files = filter_list_by_subject_ids(files, subj_ids)
if not files:
log.info('No files that match "{}" found in {}.'.format(regex, work_dir))
else:
log.info('# Files that match "{}" in {}:'.format(regex, work_dir))
print_list(files)
@task
def show_files(name, work_dir=DATA_DIR, filter_by_subject_ids=False):
"""Show a list of the files inside work_dir that match the regex value of the variable 'name' within the
files_of_interest section.
Parameters
----------
name: str
Name of the variable in files_of_interest section.
work_dir: str
Path of the root folder from where to start the search.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
try:
regex = get_file_of_interest_regex(name)
except:
raise
log.debug('Looking for files that match {} within {}.'.format(regex, work_dir))
files = find_files(work_dir, regex)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
files = filter_list_by_subject_ids(files, subj_ids)
if not files:
log.error('No files that match "{}" found in {}.'.format(regex, work_dir))
else:
log.debug('# Files that match "{}" in {}:'.format(regex, work_dir))
print_list(files)
return files
@task
def show_my_files(rcpath, app_name=APPNAME, filter_by_subject_ids=False):
"""Shows the files within the rcpath, i.e., a string with one '/', in the
format <variable of folder path>/<variable of files_of_interest regex>.
Parameters
----------
rcpath: str
A path with one '/', in the format <variable of folder path>/<variable of files_of_interest regex>.
For example: 'data_dir/anat' will look for the folder path in the data_dir variable and the regex in the
anat variable inside the files_of_interest section.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
if '/' not in rcpath:
log.error("Expected an rcpath in the format <variable of folder path>/<variable of files_of_interest regex>.")
return -1
dir_name, foi_name = rcpath.split('/')
app_cfg = rcfile(app_name)
if dir_name not in app_cfg:
log.error("Option {} not found in {} section.".format(dir_name, app_name))
return -1
work_dir = op.expanduser(app_cfg[dir_name])
return show_files(foi_name, work_dir, filter_by_subject_ids=filter_by_subject_ids)
@task
def clean():
"""Remove a few temporal files and logs in this folder."""
local('rm *.log')
local('rm *.pyc')
shutil.rmtree('__pycache__')
@task(autoprint=True)
def get_standard_file(file_name_varname, app_name=APPNAME):
""" Return the path to an atlas or a standard template file.
Looking for 'standard' and 'atlas' section in rcfiles.
Parameters
----------
file_name_varname: str
app_name: str
Returns
-------
std_path: str
Path to the atlas or the standard template.
"""
section_name, var_value = find_in_sections(file_name_varname, app_name)
if section_name == 'atlases':
std_path = op.join(ATLAS_DIR, var_value)
elif section_name == 'standard':
std_path = op.join(STD_DIR, var_value)
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_name_varname, section_name))
return std_path
#
# @task
# def create_cpac_subj_list(anat_file_var='raw_anat', rest_files_vars=['raw_rest'],
# output='CPAC_subject_list_file.yaml',
# filter_by_subject_ids=False, verbose=False):
# """Create a C-PAC subject list file including the path to the files represented by the variables in
# conf_variables.
#
# Parameters
# ----------
# anat_file_var: str
# Variable name in the application rcfiles which hold the name of the subject anatomical file.
#
# rest_files_vars: list of str
# List of variable names in the application rcfiles which hold the name of the subject fMRI files.
#
# output: str
# Path of the output file
#
# filter_by_subject_ids: bool
# If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
# and let only matches to the subject ID list values.
#
# verbose: bool
# If True will show debug logs.
# """
# import yaml
#
#
@task
def run_cpac(cpac_pipeline_file_varname='cpac_pipeline_file', verbose=False):
"""Execute cpac_run.py using the configuration from the rcfile"""
try:
conf_dir = op.realpath(op.join(op.dirname(__file__), CFG['cpac_conf'] ))
subjects_list = op.realpath(op.join(conf_dir, CFG['cpac_subjects_list'] ))
pipeline_file = op.realpath(op.join(conf_dir, CFG[cpac_pipeline_file_varname]))
except KeyError as ke:
log.exception(ke)
raise
verbose_switch(verbose)
cpac_cmd = 'cpac_run.py'
cpac_path = which(cpac_cmd)
if cpac_path is None:
log.error('Could not find {} command.'.format(cpac_cmd))
return -1
if op.exists('cpac.log'):
log.debug('Remove cpac.log file.')
os.remove('cpac.log')
cmd = '"{}" "{}" "{}"'.format(cpac_path, pipeline_file, subjects_list)
log.debug('Calling: {}'.format(cmd))
log.info ('Logging to cpac.log')
# print('import CPAC')
# print('CPAC.pipeline.cpac_runner.run("{}", "{}")'.format(pipeline_file, subjects_list))
call_and_logit(cmd, 'cpac.log')
# ----------------------------------------------------------------------------------------------------------------------
# COBRE PROJECT SPECIFIC FUNCTIONS
# ----------------------------------------------------------------------------------------------------------------------
OLD_COBRE_DIR = op.expanduser(CFG.get('old_cobre_dir', ''))
OLD_COBRE_CFG = rcfile(APPNAME, 'old_cobre')
SUBJ_ID_REGEX = CFG['subj_id_regex']
FSURF_DIR = op.expanduser(CFG['fsurf_dir'])
PREPROC_DIR = OLD_COBRE_DIR
@task
def recon_all(input_dir=RAW_DIR, out_dir=FSURF_DIR, use_cluster=True, verbose=False, filter_by_subject_ids=False):
"""Execute recon_all on all subjects from input_dir/raw_anat
Parameters
----------
input_dir: str
Path to where the subjects are
out_dir: str
Path to output folder where freesurfer will leave results.
use_cluster: bool
If True will use fsl_sub to submit the jobs to your set up cluster queue. This is True by default.
Use the flag -c to set it to False.
verbose: bool
If True will show debug logs.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
os.environ['SUBJECTS_DIR'] = out_dir
regex = get_file_of_interest_regex('raw_anat')
subj_anats = find_files(input_dir, regex)
subj_reg = re.compile(SUBJ_ID_REGEX)
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if filter_by_subject_ids:
subj_anats = filter_list_by_subject_ids(subj_anats, subj_ids)
recon_all = which('recon-all')
for subj_anat_path in subj_anats:
subj_id = subj_reg.search(subj_anat_path).group()
#recon-all -all -i raw/0040000/session_1/anat_1/mprage.nii.gz -s 0040000
cmd = '{} -all -i {} -s {} -sd {}'.format(recon_all, subj_anat_path, subj_id, out_dir)
if use_cluster:
cmd = 'fsl_sub ' + cmd
log.debug('Calling {}'.format(cmd))
call_and_logit(cmd, 'freesurfer_{}.log'.format(subj_id))
def show_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', filter_by_subject_ids=False, app_name=APPNAME):
""" Return a list of the file_name_varname files in the corresponding pipeline.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
Returns
-------
fois
list of matched filepaths
"""
print_list(get_pipeline_files(root_dir=root_dir, section_name=section_name, pipe_varname=pipe_varname,
file_name_varname=file_name_varname, filter_by_subject_ids=filter_by_subject_ids,
app_name=app_name))
@task(autoprint=True)
def get_pipeline_folder(root_dir=PREPROC_DIR, pipe_section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
app_name=APPNAME):
pipe_dirpath = get_rcfile_variable_value(pipe_varname, section_name=pipe_section_name, app_name=app_name)
root_dir = get_sys_path (root_dir, section_name=pipe_section_name, app_name=app_name)
return op.join(root_dir, pipe_dirpath)
@task(autoprint=True)
def get_pipeline_files(root_dir=PREPROC_DIR, pipe_section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', filter_by_subject_ids=False, app_name=APPNAME):
"""See show_pipeline_files."""
pipe_dir = get_pipeline_folder(root_dir=root_dir, pipe_varname=pipe_varname,
pipe_section_name=pipe_section_name, app_name=app_name)
section_name, var_value = find_in_sections(file_name_varname, app_name)
if section_name == 'files_of_interest':
varname = var_value
log.debug('Looking for {} files from pipe {} within {} folder'.format(varname, pipe_varname, pipe_dir))
files = find_files(pipe_dir, varname)
elif section_name == 'relative_paths':
varname = get_rcfile_variable_value('funcfiltx', section_name='files_of_interest', app_name=app_name)
relpath = var_value
log.debug('Looking for {} files from pipe {} within {} folder'.format(varname, pipe_varname, pipe_dir))
files = [op.join(pipe_dir, subj_f, relpath) for subj_f in os.listdir(pipe_dir)]
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_name_varname, section_name))
if filter_by_subject_ids:
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
files = filter_list_by_subject_ids(files, subj_ids)
log.debug('Found {} files that match the file name in pipeline folder {}.'.format(len(files), pipe_dir))
return files
@task
def show_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', verbose=False, filter_by_subject_ids=False):
"""Return a list of the file_name_varname files in the corresponding pipeline.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
pipe_files = get_pipeline_files(root_dir, section_name, pipe_varname, file_name_varname,
filter_by_subject_ids=filter_by_subject_ids)
if not pipe_files:
log.info('Could not find {} files from pipe {} within {} folder'.format(file_name_varname, pipe_varname, root_dir))
else:
print_list(pipe_files)
@task
def pack_pipeline_files(root_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_wglob',
file_name_varname='reho', mask_file_varname='brain_mask_dil_3mm', smooth_fwhm=0,
output_file='cobre_reho_pack.mat', verbose=False, filter_by_subject_ids=False):
"""Mask and compress the data into a file.
Will save into the file: data, mask_indices, vol_shape
data: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
file_name_varname: str
RCfile variable name for the file you are looking for.
mask_file_varname: str
RCfile variable name for the mask file that you want to use to mask the data.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
mask_file = None
if mask_file_varname:
mask_file = op.join(op.expanduser(CFG['std_dir']), FOI_CFG[mask_file_varname])
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
pipe_files = get_pipeline_files(root_dir, section_name, pipe_varname, file_name_varname,
filter_by_subject_ids=filter_by_subject_ids)
pipe_files.sort()
if not pipe_files:
log.info('Could not find {} files from pipe {} '
'within {} folder'.format(file_name_varname, pipe_varname, root_dir))
exit(-1)
log.debug('Parsing {} subjects into a Nifti file set.'.format(len(pipe_files)))
try:
_pack_files_to(pipe_files, mask_file=mask_file, labels=labels, subj_ids=subj_ids, smooth_fwhm=smooth_fwhm,
output_file=output_file, verbose=verbose)
except:
log.exception('Error creating the set of subjects from {} files '
'from pipe {} within {} folder'.format(file_name_varname, pipe_varname, root_dir))
raise
def _pack_files_to(images, output_file, mask_file=None, labels=None, subj_ids=None, smooth_fwhm=0, verbose=False):
"""Get NeuroImage files mask them, put all the data in a matrix and save them into
output_file together with mask shape and affine information and labels.
Will save into the file: data, mask_indices, vol_shape, labels
data: Numpy array with shape N x prod(vol.shape)
containing the N files as flat vectors.
mask_indices: matrix with indices of the voxels in the mask
vol_shape: Tuple with shape of the volumes, for reshaping.
Parameters
----------
images: list of str or img-like object.
See boyle.nifti.NeuroImage constructor docstring.
mask: str or img-like object.
See boyle.nifti.NeuroImage constructor docstring.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
subj_ids: list or tuple of str
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
"""
from boyle.nifti.sets import NeuroImageSet
verbose_switch(verbose)
try:
subj_set = NeuroImageSet(images, mask=mask_file, labels=labels, all_compatible=True)
subj_set.others['subj_ids'] = np.array(subj_ids)
except:
raise
else:
log.debug('Saving masked data into file {}.'.format(output_file))
subj_set.to_file(output_file, smooth_fwhm=smooth_fwhm)
@task
def pack_files(name, output_file, work_dir=DATA_DIR, mask_file=None, labels=None, subj_ids=None, smooth_fwhm=0,
verbose=False, filter_by_subject_ids=False):
"""Pack a list of the files inside work_dir that match the regex value of the variable 'name' within the
files_of_interest section.
Parameters
----------
name: str
Name of the variable in files_of_interest section.
work_dir: str
Path of the root folder from where to start the search.s
mask: str
RCfile variable name for the mask file that you want to use to mask the data.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
subj_ids: list or tuple of str
This list shoule have the same length as images.
If None, will use the info in the rcfile config files.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
if mask_file is not None:
mask_file = op.join(op.expanduser(CFG['std_dir']), CFG[mask_file])
check_file_exists(mask_file)
try:
images = show_files(name, work_dir=work_dir, filter_by_subject_ids=filter_by_subject_ids)
except:
raise
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if images:
_pack_files_to(images, output_file, mask_file=mask_file, labels=labels, subj_ids=subj_ids,
smooth_fwhm=smooth_fwhm, verbose=verbose)
@task
def pack_my_files(rcpath, output_file, app_name=APPNAME, mask_file=None, labels=None, smooth_fwhm=0,
verbose=False, filter_by_subject_ids=False):
"""Pack a list of the files inside within the rcpath, i.e., a string with one '/', in the
format <variable of folder path>/<variable of files_of_interest regex>.
Parameters
----------
rcpath: str
A path with one '/', in the format <variable of folder path>/<variable of files_of_interest regex>.
For example: 'data_dir/anat' will look for the folder path in the data_dir variable and the regex in the
anat variable inside the files_of_interest section.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
mask_file: str
RCfile variable name for the mask file that you want to use to mask the data.
labels: list or tuple of str or int or float.
This list shoule have the same length as images.
smooth_fwhm: int
FWHM size in mm of a Gaussian smoothing kernel to smooth the images before storage.
output_file: str
Path to the output file. The extension of the file will be taken into account for the file format.
Choices of extensions: '.pyshelf' or '.shelf' (Python shelve)
'.mat' (Matlab archive),
'.hdf5' or '.h5' (HDF5 file)
verbose: bool
If verbose will show DEBUG log info.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
"""
verbose_switch(verbose)
if mask_file is not None:
mask_file = get_standard_file(mask_file)
check_file_exists(mask_file)
try:
images = show_my_files(rcpath, app_name=app_name, filter_by_subject_ids=filter_by_subject_ids)
except:
raise
subj_ids, labels = get_subject_ids_and_labels(filter_by_subject_ids)
if images:
_pack_files_to(images, output_file, mask_file=mask_file, labels=labels, subj_ids=subj_ids,
smooth_fwhm=smooth_fwhm, verbose=verbose)
def get_cobre_export_data(root_dir=EXPORTS_DIR, section_name='old_cobre', type='timeseries', regex='',
app_name=APPNAME):
"""
Parameters
----------
root_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
type: str
Type of the data within the exported file archive. Choices:
'timeseries' - for smoothed or not raw fMRI timeseries data
'scalar_activity' - for local activity measures from fMRI timeseries data, e.g., reho, alff, etc.
regex: str
Regular expression to match with the archive file name.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Results
-------
list
List of export files found
"""
type_choices = {'timeseries', 'scalar_activity'}
try:
settings = get_rcfile_section(app_name, section_name)
feats_dir_name = settings['features_dir']
ts_feats_dir_name = settings['timeseries_feats_dir']
scalar_feats_dir_name = settings['scalar_wtemp_noglob_feats_dir']
except IOError:
raise
except:
msg = 'Error looking for variable names in {} rc file in section {}.'.format(app_name, section_name)
log.exception (msg)
raise KeyError(msg)
if type == 'timeseries':
work_dir = op.join(root_dir, feats_dir_name, ts_feats_dir_name)
elif type == 'scalar_activity':
work_dir = op.join(root_dir, feats_dir_name, scalar_feats_dir_name)
else:
msg = 'Expected type variable value of {} but got {}.'.format(type_choices, type)
log.error(msg)
raise ValueError(msg)
files = find_files(work_dir, regex)
if len(files):
log.debug('Found the following export data files: {}.'.format(files))
else:
log.debug('Did not found any export data files within {} with te regex {}.'.format(work_dir, regex))
return files
def get_cobre_export_timeseries(root_dir=EXPORTS_DIR, section_name='old_cobre', fwhm='4mm'):
"""
See get_cobre_export_data.
Parameters
----------
fwhm: str
Part of the file name with information of FWHM smoothing kernel size, e.g.: '0mm' or '4mm'
Returns
-------
List of files found
"""
regex = '.*' + fwhm + '.*'
return get_cobre_export_data(root_dir, section_name=section_name, type='timeseries', regex=regex)
def get_cobre_export_scalar_data(root_dir=EXPORTS_DIR, section_name='old_cobre', type='reho', pipeline='wtemp_noglob'):
"""
See get_cobre_export_data.
Parameters
----------
type: str
Type of scalar fMRI-based activity measure, e.g., 'reho', 'alff', 'falff', 'vmhc'
pipeline: str
Pipeline configuration for
Returns
-------
List of files found
"""
regex = '.*' + type + '.*' + pipeline + '.*'
return get_cobre_export_data(root_dir, section_name=section_name, type='scalar_activity', regex=regex)
def has_the_correct_subject_order(alist, filter_by_subject_ids=False):
"""Using the subject id list from get_subject_ids_and_labels will match alist for the same length and order.
Parameters
----------
alist: list of str or list of str
If list of string will re.search each string item using the corresponding subject id.
If list of lists of string will look within each sub-list for an exact match of the corresponding subject id.
Returns
-------
has_the_correct_order: bool
Will return False with any error, length mismatch or element without subject id match.
True otherwise.
"""
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
if len(ids) < 1:
msg = 'The list of subjects ids is empty. Expected something else.'
log.error(msg)
return False
if len(alist) < 1:
msg = 'The given list to be checked is empty. Expected something else.'
log.error(msg)
return False
if len(ids) != len(alist):
msg = 'The length of the given list and the list of subject ids are different. Expected the same length, ' \
'got {} and {}. The fist element of the given list is {}.'.format(len(alist), len(ids), alist[0])
log.error(msg)
return False
for items in zip(ids, alist):
if isinstance(items[1], str):
if re.search(items[0], items[1]) is None:
return False
elif isinstance(items[1], list):
if where_is(items[1], items[0], lookup_func=re.search) < 0:
return False
else:
log.error('The given list element type is {}. Expected str or list of str.'
'The first element of the given list is {}'.format(type(items[1]), alist[0]))
return False
return True
@task(autoprint=True)
def get_subject_folders(work_dir=PREPROC_DIR, section_name='old_cobre', pipe_varname='pipe_wtemp_noglob',
app_name=APPNAME, verbose=False, filter_by_subject_ids=False, check_order=True):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
work_dir: str (optional)
Root folder path
section_name: str (optional)
Name of the section in the rcfiles to look for the pipe_varname argument value.
pipe_varname: str (optional)
Name of the variable in the rcfiles which hold the path to the desired pipeline to look for the subject folder.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
# get the pipeline folder path
pipe_dirpath = get_pipeline_folder(root_dir=work_dir, pipe_section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name)
folders = [op.join(pipe_dirpath, subj_f) for subj_f in os.listdir(pipe_dirpath)]
subj_ids, _ = get_subject_ids_and_labels(filter_by_subject_ids)
subj_folders = []
for idx, sid in enumerate(subj_ids):
fidx = where_is(folders, sid, lookup_func=re.search)
if fidx >= 0:
subj_folders.append(folders[fidx])
# check that func and ids have the same length and match
if check_order:
if not has_the_correct_subject_order(subj_folders, filter_by_subject_ids=filter_by_subject_ids):
raise IOError('The list of subject folders found in {} and the list of subject '
'ids do not match.'.format(pipe_dirpath))
return subj_folders
@task(autoprint=True)
def get_subject_folder(subj_id, work_dir=PREPROC_DIR, section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
work_dir: str (optional)
Root folder path
section_name: str (optional)
Name of the section in the rcfiles to look for the pipe_varname argument value.
pipe_varname: str (optional)
Name of the variable in the rcfiles which hold the path to the desired pipeline to look for the subject folder.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=False)
return get_subject_folder_from_list(subj_id, file_list=subj_folders, verbose=verbose)
@task(autoprint=True)
def get_subject_file(file_varname, subj_dir, check_exists=True, app_name=APPNAME):
""" Return the filepath for the rcfile file_varname for the given subject folder.
Parameters
----------
file_varname: str
subj_dir: str
check_exists: bool
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
filepath: str
"""
section_name, var_value = find_in_sections(file_varname, app_name)
if section_name == 'files_of_interest':
filepath = find_files(subj_dir, var_value)
if isinstance(filepath, list):
if len(filepath) == 1:
filepath = filepath[0]
else:
raise IOError('Found more than one file {} within {}.'.format(var_value, subj_dir))
elif section_name == 'relative_paths':
filepath = op.join(subj_dir, var_value)
else:
raise KeyError('The variable {} could only be found in section {}. '
'I do not know what to do with this.'.format(file_varname, section_name))
if check_exists:
if not op.exists(filepath):
raise IOError('File {} not found.'.format(filepath))
return filepath
@task(autoprint=True)
def get_subject_folder_from_list(subj_id, file_list=None, verbose=False):
"""Return the first folder within the pipeline folder that is found with the name subj_id.
Parameters
----------
subj_id: str
ID number of the subject.
file_list: list of str (optional)
List of file paths which will be looked through to find the subject folder. All other variables will be ignored.
verbose: bool (optional)
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_dir: str
Path to the subject folder.
"""
verbose_switch(verbose)
# check that func and ids have the same length and match
if not has_the_correct_subject_order([f.split(op.sep) for f in file_list]):
msg = 'The list of functional files found and the list of subject ids do not match.'
raise RuntimeError(msg)
ids, _ = get_subject_ids_and_labels()
idx = where_is(ids, subj_id)
functional = file_list[idx]
# find the subject root dir
subjid_idx = where_is(functional.split(op.sep), subj_id, lookup_func=re.search)
subj_dir = os.sep.join(functional.split(op.sep)[0:subjid_idx + 1])
return subj_dir
@task
def slicesdir(underlying, outline=None, work_dir=PREPROC_DIR, section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', verbose=False, filter_by_subject_ids=False, axials=False):
""" Call slicesdir using the relative file paths in the files_of_interest section.
Parameters
----------
underlying: str
A files_of_interest relative file path variable, that will be used to look for the volume files that will
be used as background in the slices images.
outline: str
If is a path to a file, this will be used as red-outline image on top of all images in underlying.
If a files_of_interest relative file path variable, will match this list with the underlying subjects list
and use each of them as red-outline image on top of the corresponding underlying image.
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
axials: bool
If True will output every second axial slice rather than just 9 ortho slices.
verbose: bool
If verbose will show DEBUG log info.
"""
verbose_switch(verbose)
# slicesdir
slicesdir = op.join(os.environ['FSLDIR'], 'bin', 'slicesdir')
# get the list of functional files for the given pipeline
funcs = get_pipeline_files(root_dir=work_dir, section_name=section_name,
pipe_varname=pipe_varname, filter_by_subject_ids=filter_by_subject_ids,
file_name_varname='funcfiltx')
# check that func and ids have the same length and match
if not has_the_correct_subject_order([f.split(op.sep) for f in funcs]):
msg = 'The list of functional files found and the list of subject ids do not match.'
raise RuntimeError(msg)
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
outline_filepath = ''
outline_is_one = False
if outline is not None:
if op.exists(outline):
outline_filepath = outline
outline_is_one = True
# get relative filepaths
underlying_filepath = get_file_of_interest_regex(underlying)
log.debug('Using as background image: {}'.format(underlying_filepath))
if outline is not None and not outline_filepath:
outline_filepath = get_file_of_interest_regex(outline)
log.debug('Using as red outline image: {}'.format(outline_filepath))
underlyings = []
outlines = []
for idx, subj_id in enumerate(ids):
subj_dir = get_subject_folder_from_list(subj_id, file_list=funcs, verbose=verbose)
underlying = op.join(subj_dir, underlying_filepath)
if not op.exists(underlying):
raise IOError('Could not find file {}.'.format(underlying))
underlyings.append(underlying)
if not outline_is_one and outline_filepath:
subj_outline = op.join(subj_dir, outline_filepath)
if not op.exists(subj_outline):
raise IOError('Could not find file {}.'.format(subj_outline))
outlines.append(subj_outline)
args = ' '
if axials:
args += '-S '
if outlines:
args += '-o '
args += ' '.join(['{} {}'.format(i, j) for i, j in zip(underlyings, outlines)])
elif outline_is_one:
args += '-p {} '.format(outline_filepath)
args += ' '.join(underlyings)
else:
args += ' '.join(underlyings)
cmd = slicesdir + args
log.debug('Running: {}'.format(cmd))
local(cmd)
@task(autoprint=True)
def register_atlas_to_functionals(work_dir=PREPROC_DIR, atlas='aal_3mm', anat_out_var='aal_3mm_anat',
func_out_var='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', verbose=False, filter_by_subject_ids=False,
parallel=False, app_name=APPNAME):
"""Apply the existent transformation from MNI standard to functional MRI to an atlas image in MNI space.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
anat_out_var: str
Variable name that holds the file name of the resulting registered atlas in a specific subject functional
space.
func_out_var: str
Variable name that holds the file name of the resulting registered atlas in a specific subject functional
space.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
parallel: bool
If True will launch the commands using ${FSLDIR}/fsl_sub to use the cluster infrastructure you have setup
with FSL (SGE or HTCondor).
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
verbose_switch(verbose)
try:
atlas_filepath = get_standard_file(atlas)
except:
atlas_filepath = atlas
if not op.exists(atlas_filepath):
raise IOError('Could not find atlas file {}.'.format(atlas_filepath))
#read relative filepaths
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=filter_by_subject_ids,
check_order=True)
for subj_path in subj_folders:
find_subject_file_and_check = partial(get_subject_file, subj_dir=subj_path, check_exists=True)
anat_brain = find_subject_file_and_check('anat_brain' )
avg_func = find_subject_file_and_check('mean_func' )
atlas2anat_lin = find_subject_file_and_check('anat_to_mni_mat' )
atlas2anat_nlin = find_subject_file_and_check('anat_to_mni_nl' )
anat2func_lin = find_subject_file_and_check('anat_to_func_mat')
atlas_in_anat = get_subject_file(anat_out_var, subj_dir=subj_path, check_exists=False)
atlas_in_func = get_subject_file(func_out_var, subj_dir=subj_path, check_exists=False)
log.debug('Registering atlas to functional: {}.\n'.format(' ,'.join([anat_brain, avg_func, atlas2anat_lin,
atlas2anat_nlin, anat2func_lin,
atlas_in_anat,
atlas_in_func])))
xfm_atlas_to_functional(atlas_filepath, anat_brain, avg_func, atlas2anat_lin, atlas2anat_nlin, False,
anat2func_lin, atlas_in_anat, atlas_in_func, interp='nn', verbose=verbose,
rewrite=False, parallel=parallel)
@task(autoprint=True)
def get_atlaspartition_hdf5path(subj_id, pipe_varname='pipe_wtemp_noglob', atlas='aal_3mm_func'):
""" Return the hdf5 path for the atlas partition for the subject timeseries in the pipeline.
Parameters
----------
pipe_varname:
Pipeline variable name.
atlas:
Atlas variable name
subj_id: str
Subject ID
Returns
-------
hdf5path: str
"""
return '/{}_{}_timeseries/{}'.format(pipe_varname, atlas, subj_id)
@task(autoprint=True)
def get_atlaspartition_hdf5_filepath(atlas='aal_3mm_func', app_name=APPNAME):
""" Return the path of the HDF5 file which contains the atlas partition timeseries.
atlas: str
Atlas variable name
app_name: str
Returns
-------
hdf5_filepath: str
"""
if atlas == 'atlas_3mm_func':
return op.join(EXPORTS_DIR, get_rcfile_variable_value('out_aal_timeseries', app_name=app_name))
else:
raise ValueError('Expected the name of a valid atlas variable name as `atlas_3mm_func`, '
'but got {}.'.format(atlas))
@task(autoprint=True)
def get_connectivity_hdf5_filepath(atlas='aal_3mm_func', app_name=APPNAME):
""" Return the path of the HDF5 file which contains the connectivity matrices.
atlas: str
Atlas variable name
app_name: str
Returns
-------
hdf5_filepath: str
"""
if atlas == 'atlas_3mm_func':
return op.join(EXPORTS_DIR, get_rcfile_variable_value('out_aal_connectivities', app_name=app_name))
else:
raise ValueError('Expected the name of a valid atlas variable name as `atlas_3mm_func`, '
'but got {}.'.format(atlas))
@task
def save_atlas_timeseries_packs(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
filter_by_subject_ids=False):
""" Save the atlas partitioned timeseries into an HDF5 file.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
verbose_switch(verbose)
subj_timeseries = atlas_partition_timeseries(work_dir=work_dir, atlas=atlas, section_name=section_name,
pipe_varname=pipe_varname, app_name=app_name, verbose=verbose,
filter_by_subject_ids=filter_by_subject_ids)
timeseries_filepath = get_atlaspartition_hdf5_filepath(atlas, app_name=app_name)
for subj_id in subj_timeseries:
# save_ts_pack into HDF file.
h5path = get_atlaspartition_hdf5path(subj_id, pipe_varname=pipe_varname, atlas=atlas)
log.debug('Saving {} {} partitioned functional timeseries in '
'{} group {}.'.format(subj_id, atlas, timeseries_filepath, h5path))
save_variables_to_hdf5(timeseries_filepath, {'{}_timeseries'.format(atlas): subj_timeseries[subj_id]}, mode='a',
h5path=h5path)
def atlas_partition_timeseries(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
filter_by_subject_ids=False):
""" Return a dictionary with each subject's timeseries partitioned by the atlas file.
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_timeseries: dict
"""
verbose_switch(verbose)
#read relative filepaths
subj_folders = get_subject_folders(work_dir=work_dir, section_name=section_name, pipe_varname=pipe_varname,
app_name=app_name, verbose=verbose, filter_by_subject_ids=filter_by_subject_ids,
check_order=True)
ids, _ = get_subject_ids_and_labels(filter_by_subject_ids=filter_by_subject_ids)
subj_timeseries = OrderedDict()
for idx, subj_path in enumerate(subj_folders):
subj_id = ids[idx]
find_subject_file_and_check = partial(get_subject_file, subj_dir=subj_path, check_exists=True)
funcbrainmask = find_subject_file_and_check('funcbrainmask' )
functional = find_subject_file_and_check('func_freq_filtered')
atlas_in_func = find_subject_file_and_check(atlas )
log.debug('Partitioning subject {} timeseries in {} using atlas {}.'.format(subj_id, functional, atlas_in_func))
subj_atlas_ts = partition_timeseries(functional, atlas_in_func, funcbrainmask, zeroe=True, roi_values=None,
outdict=True)
subj_timeseries[subj_id] = subj_atlas_ts
return subj_timeseries
#@task
#def save_connectivity_matrices(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
# pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
# filter_by_subject_ids=False):
""" Save the connectivity matrices of with each subject's timeseries partitioned by the atlas file into an HDF5
file.
The file will be saved in exports
Parameters
----------
work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
"""
# connectitity_filepath = get_connectivity_hdf5_filepath(atlas, app_name=APPNAME)
#def create_connectivity_matrices(work_dir=PREPROC_DIR, atlas='aal_3mm_func', section_name='old_cobre',
# pipe_varname='pipe_wtemp_noglob', app_name=APPNAME, verbose=False,
# filter_by_subject_ids=False):
""" Return a dictionary with each subject's timeseries partitioned by the atlas file.
Parameters
----------
# work_dir: str
A real file path or a RCfile variable name which indicates where to start looking for files.
Note: be sure that if you want it a variable name, don't have a folder with the same name near this script.
atlas: str
Files of intereste variable name or file path to a 3D atlas volume.
section_name: str
RCfile section name to get the pipe_varname and also look for root_dir if needed.
pipe_varname: str
RCfile variable name for the pipeline pattern to match and filter the full paths of the found files.
verbose: bool
If verbose will show DEBUG log messages.
filter_by_subject_ids: bool
If True will read the file defined by subj_id_list_file variable in the rcfile and filter the resulting list
and let only matches to the subject ID list values.
verbose: bool
If verbose will show DEBUG log info.
app_name: str
Name of the app to look for the correspondent rcfile. Default: APPNAME (global variable)
Returns
-------
subj_timeseries: dict
"""
# h5path = get_atlaspartition_hdf5path(subj_id, pipe_varname=pipe_varname, atlas=atlas)
# timeseries_filepath = get_atlaspartition_hdf5_filepath(atlas, app_name=app_name)
# connectitity_filepath = get_connectivity_hdf5_filepath(atlas, app_name=APPNAME)
#load_variables_from_hdf5
#ts = h5py.File('/Users/alexandre/Dropbox (Neurita)/projects/cobre/cobre_partitioned_timeseries.hdf5')
|
alexsavio/cobre
|
fabfile.py
|
Python
|
bsd-3-clause
| 68,111
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.