repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
ioana-delaney/spark
|
dev/pip-sanity-check.py
|
80
|
1384
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.ml.param import Params
from pyspark.mllib.linalg import *
import sys
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PipSanityCheck")\
.getOrCreate()
sc = spark.sparkContext
rdd = sc.parallelize(range(100), 10)
value = rdd.reduce(lambda x, y: x + y)
if (value != 4950):
print("Value {0} did not match expected value.".format(value), file=sys.stderr)
sys.exit(-1)
print("Successfully ran pip sanity check")
spark.stop()
|
apache-2.0
|
ScreamingUdder/mantid
|
scripts/HFIRPowderReduction/HfirPDReductionGUI.py
|
1
|
91448
|
# pylint: disable=invalid-name, relative-import, too-many-lines,too-many-instance-attributes,too-many-arguments,C901
################################################################################
# Main class for HFIR powder reduction GUI
# Key word for future developing: FUTURE, NEXT, REFACTOR, RELEASE 2.0
################################################################################
from __future__ import (absolute_import, division, print_function)
from six.moves import range
import numpy
import os
try:
import urllib.request as urllib
except ImportError:
import urllib
from .ui_MainWindow import Ui_MainWindow # import line for the UI python class
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
import mantid
import mantidqtpython as mqt
from . import HfirPDReductionControl
# ----- default configuration ---------------
DEFAULT_SERVER = 'http://neutron.ornl.gov/user_data'
DEFAULT_INSTRUMENT = 'hb2a'
DEFAULT_WAVELENGTH = 2.4100
# -------------------------------------------
class EmptyError(Exception):
""" Exception for finding empty input for integer or float
"""
def __init__(self, value):
""" Init
"""
Exception.__init__(self)
self.value = value
def __str__(self):
return repr(self.value)
class MultiScanTabState(object):
""" Description of the state of the multi-scan-tab is in
"""
NO_OPERATION = 0
RELOAD_DATA = 1
REDUCE_DATA = 2
def __init__(self):
""" Initialization
:return:
"""
self._expNo = -1
self._scanList = []
self._xMin = None
self._xMax = None
self._binSize = 0
self._unit = ''
self._plotRaw = False
self._useDetEfficiencyCorrection = False
self._excludeDetectors = []
def compare_state(self, tab_state):
""" Compare this tab state and another tab state
:param tab_state:
:return:
"""
if isinstance(tab_state, MultiScanTabState) is False:
raise NotImplementedError('compare_state must have MultiScanTabStatus as input.')
if self._expNo != tab_state.getExpNumber() or self._scanList != tab_state.getScanList:
return self.RELOAD_DATA
for attname in self.__dict__.keys():
if self.__getattribute__(attname) != tab_state.__getattribute__(attname):
return self.REDUCE_DATA
return self.NO_OPERATION
def getExpNumber(self):
""" Get experiment number
:return:
"""
return self._expNo
def getScanList(self):
""" Get the list of scans
:return:
"""
return self._scanList[:]
# pyline: disable=too-many-arguments
def setup(self, exp_no, scan_list, min_x, max_x, bin_size, unit, raw, correct_det_eff, exclude_dets):
"""
Set up the object
:param exp_no:
:param scan_list:
:param min_x:
:param max_x:
:param bin_size:
:param unit:
:param raw:
:param correct_det_eff:
:param exclude_dets:
:return:
"""
self._expNo = int(exp_no)
if isinstance(scan_list, list) is False:
raise NotImplementedError('Scan_List must be list!')
self._scanList = scan_list
self._xMin = min_x
self._xMax = max_x
self._binSize = float(bin_size)
self._unit = str(unit)
self._plotRaw = raw
self._useDetEfficiencyCorrection = correct_det_eff
self._excludeDetectors = exclude_dets
return
# pylint: disable=too-many-public-methods,too-many-branches,too-many-locals,too-many-statements
class MainWindow(QtGui.QMainWindow):
""" Class of Main Window (top)
"""
# Copy to ui.setupUI
# # Version 3.0 + Import for ui_MainWindow.py
# from MplFigureCanvas import Qt4MplCanvas
# # Replace 'self.graphicsView = QtGui.QtGraphicsView' with the following
# self.graphicsView = Qt4MplCanvas(self.centralwidget)
# self.mainplot = self.graphicsView.getPlot()
def __init__(self, parent=None):
""" Initialization and set up
"""
# Base class
QtGui.QMainWindow.__init__(self, parent)
# UI Window (from Qt Designer)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Define gui-event handling
# menu
self.connect(self.ui.actionQuit, QtCore.SIGNAL('triggered()'),
self.doExist)
self.connect(self.ui.actionFind_Help, QtCore.SIGNAL('triggered()'),
self.doHelp)
# main
self.connect(self.ui.comboBox_wavelength, QtCore.SIGNAL('currentIndexChanged(int)'),
self.doUpdateWavelength)
self.connect(self.ui.pushButton_browseExcludedDetFile, QtCore.SIGNAL('clicked()'),
self.doBrowseExcludedDetetorFile)
self.connect(self.ui.checkBox_useDetExcludeFile, QtCore.SIGNAL('stateChanged(int)'),
self.do_enable_excluded_dets)
# tab 'Raw Detectors'
self.connect(self.ui.pushButton_plotRaw, QtCore.SIGNAL('clicked()'),
self.doPlotRawPtMain)
self.connect(self.ui.pushButton_ptUp, QtCore.SIGNAL('clicked()'),
self.do_plot_raw_pt_prev)
self.connect(self.ui.pushButton_ptDown, QtCore.SIGNAL('clicked()'),
self.doPlotRawPtNext)
self.connect(self.ui.pushButton_clearRawDets, QtCore.SIGNAL('clicked()'),
self.doClearRawDetCanvas)
# tab 'Individual Detectors'
self.connect(self.ui.pushButton_plotIndvDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetMain)
self.connect(self.ui.pushButton_plotPrevDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetPrev)
self.connect(self.ui.pushButton_plotNextDet, QtCore.SIGNAL('clicked()'),
self.doPlotIndvDetNext)
self.connect(self.ui.pushButton_clearCanvasIndDet, QtCore.SIGNAL('clicked()'),
self.doClearIndDetCanvas)
self.connect(self.ui.pushButton_plotLog, QtCore.SIGNAL('clicked()'),
self.do_plot_sample_log)
# tab 'Normalized'
self.connect(self.ui.pushButton_loadData, QtCore.SIGNAL('clicked()'),
self.doLoadData)
self.connect(self.ui.pushButton_prevScan, QtCore.SIGNAL('clicked()'),
self.doLoadReduceScanPrev)
self.connect(self.ui.pushButton_nextScan, QtCore.SIGNAL('clicked()'),
self.doLoadReduceScanNext)
self.connect(self.ui.pushButton_unit2theta, QtCore.SIGNAL('clicked()'),
self.doReduce2Theta)
self.connect(self.ui.pushButton_unitD, QtCore.SIGNAL('clicked()'),
self.doReduceDSpacing)
self.connect(self.ui.pushButton_unitQ, QtCore.SIGNAL('clicked()'),
self.doReduceQ)
self.connect(self.ui.pushButton_saveData, QtCore.SIGNAL('clicked()'),
self.doSaveData)
self.connect(self.ui.pushButton_clearTab2Canvas, QtCore.SIGNAL('clicked()'),
self.doClearCanvas)
# tab 'Multiple Scans'
self.connect(self.ui.pushButton_loadMultData, QtCore.SIGNAL('clicked()'),
self.doLoadSetData)
self.connect(self.ui.pushButton_mscanBin, QtCore.SIGNAL('clicked()'),
self.doReduceSetData)
self.connect(self.ui.pushButton_mergeScans, QtCore.SIGNAL('clicked()'),
self.doMergeScans)
self.connect(self.ui.pushButton_viewMScan1D, QtCore.SIGNAL('clicked()'),
self.doMergeScanView1D)
self.connect(self.ui.pushButton_view2D, QtCore.SIGNAL('clicked()'),
self.doMergeScanView2D)
self.connect(self.ui.pushButton_viewMerge, QtCore.SIGNAL('clicked()'),
self.doMergeScanViewMerged)
self.connect(self.ui.pushButton_clearMultCanvas, QtCore.SIGNAL('clicked()'),
self.doClearMultiRunCanvas)
self.connect(self.ui.pushButton_saveAllIndScans, QtCore.SIGNAL('clicked()'),
self.doSaveMultipleScans)
self.connect(self.ui.pushButton_saveMerge, QtCore.SIGNAL('clicked()'),
self.doSaveMergedScan)
self.connect(self.ui.pushButton_plotRawMultiScans, QtCore.SIGNAL('clicked()'),
self.do_convert_plot_multi_scans)
# tab 'Vanadium'
self.connect(self.ui.pushButton_stripVanPeaks, QtCore.SIGNAL('clicked()'),
self.doStripVandiumPeaks)
self.connect(self.ui.pushButton_saveVanRun, QtCore.SIGNAL('clicked()'),
self.doSaveVanRun)
self.connect(self.ui.pushButton_rebin2Theta, QtCore.SIGNAL('clicked()'),
self.doReduceVanadium2Theta)
self.connect(self.ui.pushButton_smoothVanData, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumData)
self.connect(self.ui.pushButton_applySmooth, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumApply)
self.connect(self.ui.pushButton_undoSmooth, QtCore.SIGNAL('clicked()'),
self.doSmoothVanadiumUndo)
# tab 'Advanced Setup'
self.connect(self.ui.pushButton_browseCache, QtCore.SIGNAL('clicked()'),
self.doBrowseCache)
self.connect(self.ui.radioButton_useServer, QtCore.SIGNAL('clicked()'),
self.doChangeSrcLocation)
self.connect(self.ui.radioButton_useLocal, QtCore.SIGNAL('clicked()'),
self.doChangeSrcLocation)
self.connect(self.ui.pushButton_browseLocalSrc, QtCore.SIGNAL('clicked()'),
self.doBrowseLocalDataSrc)
self.connect(self.ui.pushButton_chkServer, QtCore.SIGNAL('clicked()'),
self.doCheckSrcServer)
# Define signal-event handling
# define event handlers for matplotlib canvas
self.ui.graphicsView_mergeRun.canvas.mpl_connect('button_press_event',
self.on_mouseDownEvent)
self.ui.graphicsView_mergeRun.canvas.mpl_connect('motion_notify_event',
self.on_mouseMotion)
# Widget type definition
validator0 = QtGui.QIntValidator(self.ui.lineEdit_expNo)
validator0.setBottom(1)
self.ui.lineEdit_expNo.setValidator(validator0)
validator1 = QtGui.QIntValidator(self.ui.lineEdit_expNo)
validator1.setBottom(1)
self.ui.lineEdit_scanNo.setValidator(validator1)
validator2 = QtGui.QDoubleValidator(self.ui.lineEdit_wavelength)
validator2.setBottom(0.)
self.ui.lineEdit_wavelength.setValidator(validator2)
validator3 = QtGui.QDoubleValidator(self.ui.lineEdit_xmin)
validator3.setBottom(0.)
self.ui.lineEdit_xmin.setValidator(validator3)
validator4 = QtGui.QDoubleValidator(self.ui.lineEdit_xmax)
validator4.setBottom(0.)
self.ui.lineEdit_xmax.setValidator(validator4)
validator5 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize)
validator5.setBottom(0.)
self.ui.lineEdit_binsize.setValidator(validator5)
validator6 = QtGui.QDoubleValidator(self.ui.lineEdit_ptNo)
validator6.setBottom(0)
self.ui.lineEdit_ptNo.setValidator(validator6)
validator7 = QtGui.QDoubleValidator(self.ui.lineEdit_detID)
validator7.setBottom(0)
self.ui.lineEdit_detID.setValidator(validator7)
validator8 = QtGui.QDoubleValidator(self.ui.lineEdit_min2Theta)
validator8.setBottom(0.)
self.ui.lineEdit_min2Theta.setValidator(validator8)
validator9 = QtGui.QDoubleValidator(self.ui.lineEdit_max2Theta)
validator9.setBottom(0.)
self.ui.lineEdit_max2Theta.setValidator(validator9)
validator10 = QtGui.QDoubleValidator(self.ui.lineEdit_binsize2Theta)
validator10.setBottom(0.)
self.ui.lineEdit_binsize2Theta.setValidator(validator10)
validator11 = QtGui.QIntValidator(self.ui.lineEdit_scanStart)
validator11.setBottom(1)
self.ui.lineEdit_scanStart.setValidator(validator11)
validator12 = QtGui.QIntValidator(self.ui.lineEdit_scanEnd)
validator12.setBottom(1)
self.ui.lineEdit_scanEnd.setValidator(validator12)
validator13 = QtGui.QDoubleValidator(self.ui.lineEdit_normalizeMonitor)
validator13.setBottom(0.)
self.ui.lineEdit_normalizeMonitor.setValidator(validator13)
validator14 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMinX)
validator14.setBottom(0.)
self.ui.lineEdit_mergeMinX.setValidator(validator14)
validator15 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeMaxX)
validator15.setBottom(0.)
self.ui.lineEdit_mergeMaxX.setValidator(validator15)
validator16 = QtGui.QDoubleValidator(self.ui.lineEdit_mergeBinSize)
validator16.setBottom(0.)
self.ui.lineEdit_mergeBinSize.setValidator(validator16)
# Get initial setup
# RELEASE 2.0 - This part will be implemented soon as default configuration is made
# Mantid configuration
self._instrument = str(self.ui.comboBox_instrument.currentText())
# UI widgets setup
self.ui.comboBox_outputFormat.addItems(['Fullprof']) # Supports Fullprof only now, 'GSAS', 'Fullprof+GSAS'])
# RELEASE 2.0 : Need to disable some widgets... consider to refactor the code
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
self.ui.checkBox_useDetExcludeFile.setChecked(True)
self.ui.comboBox_wavelength.setCurrentIndex(0)
self.ui.lineEdit_wavelength.setText('2.41')
self.ui.pushButton_unit2theta.setText(r'$2\theta$')
# vanadium spectrum smooth parameters
self.ui.lineEdit_smoothParams.setText('20,2')
# Set up data source
self._serverAddress = DEFAULT_SERVER
self._srcFromServer = True
self._localSrcDataDir = None
self._srcAtLocal = False
self._currUnit = '2theta'
# Workspaces
self._myControl = HfirPDReductionControl.HFIRPDRedControl()
# Interactive graphics
self._viewMerge_X = None
self._viewMerge_Y = None
# Control of plots: key = canvas, value = list of 2-integer-tuple (expno, scanno)
self._tabLineDict = {}
self._tabBinParamDict = {}
for key in [2]:
self._tabLineDict[key] = []
for key in [2, 3, 4]:
self._tabBinParamDict[key] = [None, None, None]
self._lastMergeLabel = ""
self._lastMergeIndex = -1
self._expNo = None
self._scanNo = None
self._detID = None
self._indvXLabel = None
self._rawDetExpNo = None
self._rawDetScanNo = None
self._rawDetPlotMode = None
self._rawDetPtNo = None
self._indvDetCanvasMode = 'samplelog'
# Multiple scan tab
self._multiScanExp = None
self._multiScanList = []
# help
self.assistantProcess = QtCore.QProcess(self)
# pylint: disable=protected-access
self.collectionFile = os.path.join(mantid._bindir, '../docs/qthelp/MantidProject.qhc')
version = ".".join(mantid.__version__.split(".")[:2])
self.qtUrl = 'qthelp://org.sphinx.mantidproject.' + version + '/doc/interfaces/HFIR Powder Reduction.html'
self.externalUrl = 'http://docs.mantidproject.org/nightly/interfaces/HFIR Powder Reduction.html'
# Initial setup for tab
self.ui.tabWidget.setCurrentIndex(0)
cache_dir = str(self.ui.lineEdit_cache.text()).strip()
if len(cache_dir) == 0 or os.path.exists(cache_dir) is False:
invalid_cache = cache_dir
cache_dir = os.path.expanduser('~')
self.ui.lineEdit_cache.setText(cache_dir)
if len(invalid_cache) == 0:
warning_msg = 'Cache directory is not set. '
else:
warning_msg = 'Cache directory {0} does not exist. '.format(invalid_cache)
warning_msg += 'Using {0} for caching dowloaded file instead.'.format(cache_dir)
print ('[WARNING] {0}'.format(warning_msg))
# Get on hold of raw data file
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
if useserver == uselocal:
self._logWarning("It is logically wrong to set up (1) neither server or local dir to "
"access data or (2) both server and local dir to retrieve data. "
"As default, it is set up to download data from server.")
useserver = True
uselocal = False
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
# register startup
mantid.UsageService.registerFeatureUsage("Interface", "HfirPowderReduction", False)
return
# -- Event Handling ----------------------------------------------------
def doBrowseCache(self):
""" Pop out a dialog to let user specify the directory to
cache downloaded data
"""
# home directory
homedir = str(self.ui.lineEdit_cache.text()).strip()
if len(homedir) > 0 and os.path.exists(homedir):
home = homedir
else:
home = os.getcwd()
# pop out a dialog
dirs = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory', home))
# set to line edit
if dirs != home:
self.ui.lineEdit_cache.setText(dirs)
return
def doBrowseExcludedDetetorFile(self):
""" Browse excluded detector's file
Return :: None
"""
# Get file name
filefilter = "Text (*.txt);;Data (*.dat);;All files (*)"
curDir = os.getcwd()
excldetfnames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', curDir, filefilter)
try:
excldetfname = excldetfnames[0]
self.ui.lineEdit_excludedDetFileName.setText(excldetfname)
except IndexError:
# return if there is no file selected
return
# Parse det exclusion file
print("Detector exclusion file name is %s." % (excldetfname))
excludedetlist, errmsg = self._myControl.parseExcludedDetFile('HB2A', excldetfname)
if len(errmsg) > 0:
self._logError(errmsg)
textbuf = ""
for detid in excludedetlist:
textbuf += "%d," % (detid)
if len(textbuf) > 0:
textbuf = textbuf[:-1]
self.ui.lineEdit_detExcluded.setText(textbuf)
def doBrowseLocalDataSrc(self):
""" Browse local data storage
"""
msg = "Browse local data storage location. Implement ASAP"
QtGui.QMessageBox.information(self, "Click!", msg)
def doChangeSrcLocation(self):
""" Source file location is changed
"""
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
print("Use Server: ", useserver)
print("Use Local : ", uselocal)
if (useserver and uselocal) or not (useserver or uselocal):
raise NotImplementedError("Impossible for radio buttons")
self._srcAtLocal = uselocal
self._srcFromServer = useserver
if uselocal is True:
self.ui.lineEdit_dataIP.setDisabled(True)
self.ui.pushButton_chkServer.setDisabled(True)
self.ui.lineEdit_localSrc.setDisabled(False)
self.ui.pushButton_browseLocalSrc.setDisabled(False)
else:
self.ui.lineEdit_dataIP.setDisabled(False)
self.ui.pushButton_chkServer.setDisabled(False)
self.ui.lineEdit_localSrc.setDisabled(True)
self.ui.pushButton_browseLocalSrc.setDisabled(True)
def doCheckSrcServer(self):
"""" Check source data server's availability
"""
msg = "Check source data server! Implement ASAP"
QtGui.QMessageBox.information(self, "Click!", msg)
def doClearCanvas(self):
""" Clear canvas
"""
itab = self.ui.tabWidget.currentIndex()
if itab == 2:
self.ui.graphicsView_reducedData.clearAllLines()
self._tabLineDict[itab] = []
def doClearIndDetCanvas(self):
""" Clear the canvas in tab 'Individual Detector' and current plotted lines
in managing dictionary
"""
# Clear all lines on canvas
self.ui.graphicsView_indvDet.clearAllLines()
# Remove their references in dictionary
if self.ui.graphicsView_indvDet in self._tabLineDict:
self._tabLineDict[self.ui.graphicsView_indvDet] = []
# Reset colur schedule
self.ui.graphicsView_indvDet.resetLineColorStyle()
def doClearMultiRunCanvas(self):
""" Clear the canvas in tab 'Multiple Run'
This canvas is applied to both 1D and 2D image.
Clear-all-lines might be not enough to clear 2D image
"""
self.ui.graphicsView_mergeRun.clearCanvas()
def doClearRawDetCanvas(self):
""" Clear the canvas in tab 'Raw Detector':
only need to clear lines
"""
self.ui.graphicsView_Raw.clearAllLines()
self._tabLineDict[self.ui.graphicsView_Raw] = []
def doClearVanadiumCanvas(self):
""" Clear the canvas in tab 'Vanadium'
"""
self.ui.graphicsView_vanPeaks.clearAllLines()
def doExist(self):
""" Exist the application
"""
clearcache = self.ui.checkBox_delCache.isChecked()
if clearcache:
urllib.delAllFile(self._cache)
self.close()
def doHelp(self):
""" Show help
Copied from DGSPlanner
"""
try:
import pymantidplot
pymantidplot.proxies.showCustomInterfaceHelp('HFIR Powder Reduction')
except ImportError:
self.assistantProcess.close()
self.assistantProcess.waitForFinished()
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
args = ['-enableRemoteControl', '-collectionFile', self.collectionFile, '-showUrl', self.qtUrl]
if os.path.isfile(helpapp) and os.path.isfile(self.collectionFile):
self.assistantProcess.close()
self.assistantProcess.waitForFinished()
self.assistantProcess.start(helpapp, args)
else:
mqt.MantidQt.API.MantidDesktopServices.openUrl(QtCore.QUrl(self.externalUrl))
def _load_spice_data_to_raw_table(self, exp_no, scan_no, data_file_name):
# flake8: noqa
try:
success = self._myControl.loadSpicePDData(exp_no, scan_no, data_file_name)
return success, "" if success else "Load data failed."
except NotImplementedError as ne:
return False, str(ne)
def _get_corr_file_names_and_wavelength(self, exp_no, scan_no, data_file_name):
# Obtain the correction file names and wavelength from SPICE file
wavelength_error = False
err_msg = ""
local_dir = os.path.dirname(data_file_name)
try:
status, return_body = self._myControl.retrieveCorrectionData(instrument='HB2A',
exp=exp_no, scan=scan_no,
localdatadir=local_dir)
except NotImplementedError as e:
err_msg = str(e)
if err_msg.count('m1') > 0:
# error is about wavelength
status = False
wavelength_error = True
else:
# other error
raise e
if status:
auto_wavelength = return_body[0]
van_corr_filename = return_body[1]
excl_det_filename = return_body[2]
if van_corr_filename is not None:
self.ui.lineEdit_vcorrFileName.setText(van_corr_filename)
if excl_det_filename is not None:
self.ui.lineEdit_excludedDetFileName.setText(excl_det_filename)
else:
auto_wavelength = None
van_corr_filename = None
excl_det_filename = None
return auto_wavelength, van_corr_filename, excl_det_filename, wavelength_error, err_msg
def _set_wavelength(self, auto_wavelength, wavelength_error, exp_no, scan_no, err_msg):
if auto_wavelength is None:
# unable to get wavelength from SPICE data
self.ui.comboBox_wavelength.setCurrentIndex(4)
if wavelength_error:
self.ui.lineEdit_wavelength.setText(err_msg)
else:
self.ui.lineEdit_wavelength.setText(self.ui.comboBox_wavelength.currentText())
self._myControl.setWavelength(exp_no, scan_no, wavelength=None)
else:
# get wavelength from SPICE data. set value to GUI
self.ui.lineEdit_wavelength.setText(str(auto_wavelength))
allowed_wavelengths = [2.41, 1.54, 1.12]
num_items = self.ui.comboBox_wavelength.count()
good = False
for ic in range(num_items - 1):
if abs(auto_wavelength - allowed_wavelengths[ic]) < 0.01:
good = True
self.ui.comboBox_wavelength.setCurrentIndex(ic)
if not good:
self.ui.comboBox_wavelength.setCurrentIndex(num_items - 1)
self._myControl.setWavelength(exp_no, scan_no, wavelength=auto_wavelength)
def _get_and_parse_det_efficiency_file(self, van_corr_filename):
if self.ui.checkBox_useDetEffCorr.isChecked():
# Apply detector efficiency correction
if van_corr_filename is None:
# browse vanadium correction file
file_filter = "Text (*.txt);;Data (*.dat);;All files (*)"
current_dir = os.getcwd()
van_corr_filenames = QtGui.QFileDialog.getOpenFileNames(self, 'Open File(s)', current_dir, file_filter)
if len(van_corr_filenames) > 0:
van_corr_filename = van_corr_filenames[0]
self.ui.lineEdit_vcorrFileName.setText(str(van_corr_filename))
else:
self._logError("User does not specify any vanadium correction file.")
self.ui.checkBox_useDetEffCorr.setChecked(False)
# Parse if it is not None
if van_corr_filename is not None:
detector_efficiency_ws, err_msg = self._myControl.parseDetEffCorrFile('HB2A', van_corr_filename)
if detector_efficiency_ws is None:
print("Parsing detectors efficiency file error: {0}.".format(err_msg))
return None
else:
return detector_efficiency_ws
else:
return None
else:
# Not chosen to apply detector efficiency correction:w
return None
def _parse_spice_data_to_MDEventWS(self, detector_efficiency_table, exp_no, scan_no):
try:
print("Det Efficiency Table WS: ", str(detector_efficiency_table))
exec_status = self._myControl.parseSpiceData(exp_no, scan_no, detector_efficiency_table)
return exec_status, "" if exec_status else "Parse data failed."
except NotImplementedError as e:
return False, str(e)
def _parse_detector_exclusion_file(self, exclude_detector_filename):
if exclude_detector_filename is not None:
exclude_detector_list, err_msg = self._myControl.parseExcludedDetFile('HB2A', exclude_detector_filename)
text_buf = ""
for det_id in exclude_detector_list:
text_buf += "{0},".format(det_id)
if len(text_buf) > 0:
text_buf = text_buf[:-1]
self.ui.lineEdit_detExcluded.setText(text_buf)
def doLoadData(self, exp=None, scan=None):
""" Load and reduce data
It does not support for tab 'Advanced Setup'
For tab 'Raw Detector' and 'Individual Detector', this method will load data to MDEventWorkspaces
For tab 'Normalized' and 'Vanadium', this method will load data to MDEVentWorkspaces but NOT reduce to single spectrum
"""
# Kick away unsupported tabs
i_tab = self.ui.tabWidget.currentIndex()
tab_text = str(self.ui.tabWidget.tabText(i_tab))
print("[DB] Current active tab is No. {0} as {1}.".format(i_tab, tab_text))
# Rule out unsupported tab
if i_tab == 5:
# 'advanced'
msg = "Tab {0} does not support 'Load Data'. Request is ambiguous.".format(tab_text)
QtGui.QMessageBox.information(self, "Click!", msg)
return
# Get exp number and scan number
if isinstance(exp, int) and isinstance(scan, int):
# use input
exp_no = exp
scan_no = scan
else:
# read from GUI
try:
exp_no, scan_no = self._uiGetExpScanNumber()
self._logDebug("Attending to load Exp {0} Scan {1}.".format(exp_no, scan_no))
except NotImplementedError as ne:
self._logError("Error to get Exp and Scan due to {0}.".format(str(ne)))
return
# Form data file name and download data
status, data_filename = self._uiDownloadDataFile(exp=exp_no, scan=scan_no)
if not status:
self._logError("Unable to download or locate local data file for Exp {0} Scan {1}.".format(exp_no, scan_no))
# (Load data for tab 0, 1, 2 and 4)
if i_tab not in [0, 1, 2, 3, 4]:
# Unsupported Tabs: programming error!
err_msg = "{0}-th tab should not get this far.\n".format(i_tab)
err_msg += 'GUI has been changed, but the change has not been considered! iTab = {0}'.format(i_tab)
raise NotImplementedError(err_msg)
# Load SPICE data to raw table (step 1)
load_success, msg = self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename)
if not load_success:
self._logError(msg)
return
# Obtain the correction file names and wavelength from SPICE file
(auto_wavelength, van_corr_filename, exclude_detector_filename, wavelength_error, err_msg) \
= self._load_spice_data_to_raw_table(exp_no, scan_no, data_filename)
# Set wavelength to GUI except 'multiple scans'
self._set_wavelength(auto_wavelength, wavelength_error, exp_no, scan_no, err_msg)
# Optionally obtain and parse det effecient file
detector_efficiency_table_ws = self._get_and_parse_det_efficiency_file(van_corr_filename)
# Parse SPICE data to MDEventWorkspaces
success, msg = self._parse_spice_data_to_MDEventWS(detector_efficiency_table_ws, exp_no, scan_no)
if not success:
self._logError(msg)
return
# Optionally parse detector exclusion file and set to line text
self._parse_detector_exclusion_file(exclude_detector_filename)
# Set up some widgets for raw detector data. Won't be applied to tab 3
if i_tab != 3:
float_sample_log_name_list = self._myControl.getSampleLogNames(exp_no, scan_no)
self.ui.comboBox_indvDetXLabel.clear()
self.ui.comboBox_indvDetXLabel.addItem("2theta/Scattering Angle")
self.ui.comboBox_indvDetXLabel.addItems(float_sample_log_name_list)
self.ui.comboBox_indvDetYLabel.clear()
self.ui.comboBox_indvDetYLabel.addItems(float_sample_log_name_list)
return True
def doLoadSetData(self):
""" Load a set of data
This is the first step of doing multiple scans processing
"""
# Get inputs for exp number and scans
try:
rtup = self._uiGetExpScanTabMultiScans()
expno = rtup[0]
scanlist = rtup[1]
except NotImplementedError as nie:
self._logError("Unable to load data set in multiple scans due to %s." % (str(nie)))
# Load and reduce data
loadstatus = True
for scan in sorted(scanlist):
tempstatus = self.doLoadData(expno, scan)
if not tempstatus:
self.ui.label_mergeMessage.setText('Error to load Exp %d Scan %d.' % (expno, scan))
loadstatus = False
else:
message = 'Loaded Exp %d Scan %d.' % (expno, scan)
self.ui.label_mergeMessage.setText(message)
# Load status
if loadstatus:
self.ui.label_mergeMessage.setText('All data files are loaded')
else:
self.ui.label_mergeMessage.setText('Not all data files are loaded')
# Wave length
haswavelength = True
for scan in scanlist:
if self._myControl.getWavelength(expno, scan) is None:
self._logNotice("Exp %d Scan %d has no wavelength set up." % (expno, scan))
haswavelength = False
break
# Set unit box
if haswavelength:
self.ui.comboBox_mscanUnit.clear()
self.ui.comboBox_mscanUnit.addItems(['2theta', 'dSpacing', 'Momentum Transfer (Q)'])
else:
self.ui.comboBox_mscanUnit.clear()
self.ui.comboBox_mscanUnit.addItems(['2theta'])
return
def doLoadReduceScanPrev(self):
""" Load and reduce previous scan for tab 'Normalized'
"""
# Reduce scan number by 1
try:
scanno = int(self.ui.lineEdit_scanNo.text())
except ValueError:
self._logError("Either Exp No or Scan No is not set up right as integer.")
return
else:
scanno = scanno - 1
if scanno < 1:
self._logWarning("Scan number is 1 already. Cannot have previous scan")
return
self.ui.lineEdit_scanNo.setText(str(scanno))
# Load data
self.ui.lineEdit_scanNo.setText(str(scanno))
self.doLoadData()
# Reduce data
self._uiReducePlotNoramlized(self._currUnit)
def doLoadReduceScanNext(self):
""" Load and reduce next scan for tab 'Normalized'
"""
# Advance scan number by 1
try:
scanno = int(self.ui.lineEdit_scanNo.text())
except ValueError:
self._logError("Either Exp No or Scan No is not set up right as integer.")
return False
else:
scanno = scanno + 1
if scanno < 1:
self._logWarning("Scan number is 1 already. Cannot have previous scan")
return False
# Load data
self.ui.lineEdit_scanNo.setText(str(scanno))
execstatus = self.doLoadData()
print("[DB] Load data : ", execstatus)
# Reduce data
self._uiReducePlotNoramlized(self._currUnit)
def doMergeScans(self):
""" Merge several scans for tab 'merge'
"""
# Get exp number and list of scans
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as ne:
self._logError(str(ne))
return False
# Check whether the wavelengths are same to merge
try:
wl_list = []
for scanno in scanlist:
print("Exp %d Scan %d. Wavelength = %s." % (
expno, scanno, str(self._myControl.getWavelength(expno, scanno))))
wl_list.append(float(self._myControl.getWavelength(expno, scanno)))
wl_list = sorted(wl_list)
min_wl = wl_list[0]
max_wl = wl_list[-1]
if max_wl - min_wl > 1.0:
self._logWarning("All scans do not have same wavelengths!")
except TypeError:
self._logError('Not all scans have wavelength set up. Unable to merge scans.')
return
# Check!
try:
unit = str(self.ui.comboBox_mscanUnit.currentText())
xmin, binsize, xmax = self._uiGetBinningParams(itab=3)
# wavelength = min_wl
mindex = self._myControl.mergeReduceSpiceData(expno, scanlist, unit, xmin, xmax, binsize)
except Exception as e:
raise e
label = "Exp %d, Scan %s." % (expno, str(scanlist))
self._plotMergedReducedData(mindex, label)
self._lastMergeIndex = mindex
self._lastMergeLabel = label
return
def doMergeScanView1D(self):
""" Change the multiple runs to 1D
"""
# Highlight the button's color
self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: yellow; color: red;}')
self.ui.pushButton_view2D.setEnabled(True)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: white; color: gray;}')
self.ui.pushButton_viewMScan1D.setEnabled(False)
# Process input experiment number and scan list
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
# Clear image
canvas = self.ui.graphicsView_mergeRun
canvas.clearAllLines()
canvas.clearCanvas()
# Plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
for scanno in scanlist:
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False)
def doMergeScanView2D(self):
""" Change the merged run's view to 2D plot
"""
# Highlight button color and change the color of another one
self.ui.pushButton_view2D.setStyleSheet('QPushButton {background-color: white; color: gray;}')
self.ui.pushButton_view2D.setEnabled(False)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {background-color: yellow; color: red;}')
self.ui.pushButton_viewMScan1D.setEnabled(True)
# Get list of data to plot
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
# Convert the workspaces to 2D vector
vecylist = []
yticklabels = []
xmin = None
xmax = None
for scanno in scanlist:
# put y values to list for constructing 2D array
vecx, vecy = self._myControl.getVectorToPlot(expno, scanno)
vecylist.append(vecy)
yticklabels.append('Exp %d Scan %d' % (expno, scanno))
# set up range of x
if xmin is None:
xmin = vecx[0]
xmax = vecx[-1]
dim2array = numpy.array(vecylist)
# Plot
holdprev = False
self.ui.graphicsView_mergeRun.clearAllLines()
self.ui.graphicsView_mergeRun.addPlot2D(dim2array, xmin=xmin, xmax=xmax, ymin=0,
ymax=len(vecylist), holdprev=holdprev, yticklabels=yticklabels)
def doMergeScanViewMerged(self):
""" Change the merged run's view to 1D plot
"""
# Highlight the button's color
self.ui.pushButton_view2D.setStyleSheet('QPushButton {color: red;}')
self.ui.pushButton_view2D.setEnabled(True)
self.ui.pushButton_viewMScan1D.setStyleSheet('QPushButton {color: red;}')
self.ui.pushButton_viewMScan1D.setEnabled(True)
# Clear image
self.ui.graphicsView_mergeRun.clearCanvas()
# Plot
self._plotMergedReducedData(mkey=self._lastMergeIndex, label=self._lastMergeLabel)
def doPlotIndvDetMain(self):
""" Plot individual detector
"""
# Get exp and scan numbers and check whether the data has been loaded
try:
expno = self._getInteger(self.ui.lineEdit_expNo)
scanno = self._getInteger(self.ui.lineEdit_scanNo)
except EmptyError as e:
self._logError(str(e))
return
# Get detector ID and x-label option
try:
status, detidlist = self._getIntArray(self.ui.lineEdit_detID.text())
if status is False:
errmsg = detidlist
print("Unable to parse detector IDs due to %s." % (errmsg))
return
else:
print("[DB] Detectors to plot: %s" % (detidlist))
except EmptyError:
self._logError("Detector ID must be specified for plotting individual detector.")
return
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
xlabel = str(self.ui.comboBox_indvDetXLabel.currentText()).strip()
if xlabel != "" and xlabel != "Pt." and xlabel != "2theta/Scattering Angle":
# Plot with sample logs other than Pt.
self._logNotice("New Feature: X-label %s is supported for plotting individual detector's counts. "
" Set to detector angle." % xlabel)
xlabel = xlabel
else:
# Plot with Pt. or detector angles
if xlabel != "Pt.":
xlabel = ""
self._logNotice("X-label for individual detectror is '%s'." % (xlabel))
# plot
for detid in sorted(detidlist):
try:
self._plot_individual_detector_counts(expno, scanno, detid, xlabel, resetboundary=not overplot)
self._expNo = expno
self._scanNo = scanno
self._detID = detid
self._indvXLabel = xlabel
except NotImplementedError as e:
self._logError(str(e))
def doPlotIndvDetNext(self):
""" Plot next raw detector signals for tab 'Individual Detector'
"""
# Plot
try:
currdetid = self._detID + 1
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid,
self._indvXLabel)
except KeyError as e:
self._logError(str(e))
else:
self._detID = currdetid
# Update widget
self.ui.lineEdit_detID.setText(str(self._detID))
def doPlotIndvDetPrev(self):
""" Plot previous individual detector's signal for tab 'Individual Detector'
"""
# Plot
try:
currdetid = self._detID - 1
# Over plot previous or clear
overplot = self.ui.checkBox_overPlotIndvDet.isChecked()
if overplot is False:
self.doClearIndDetCanvas()
self._plot_individual_detector_counts(self._expNo, self._scanNo, currdetid,
self._indvXLabel)
except KeyError as e:
self._logError(str(e))
else:
self._detID = currdetid
# Update widget
self.ui.lineEdit_detID.setText(str(self._detID))
def do_convert_plot_multi_scans(self):
""" Convert individual plots from normalized to raw or vice verse
"""
# Identify the mode
if str(self.ui.pushButton_plotRawMultiScans.text()) == 'Plot Raw':
new_mode = 'Plot Raw'
else:
new_mode = 'Plot Normalized'
# Get information
try:
min_x = self._getFloat(self.ui.lineEdit_mergeMinX)
except EmptyError:
min_x = None
try:
max_x = self._getFloat(self.ui.lineEdit_mergeMaxX)
except EmptyError:
max_x = None
bin_size = self._getFloat(self.ui.lineEdit_mergeBinSize)
# Process input experiment number and scan list
try:
r = self._uiGetExpScanTabMultiScans()
exp_no = r[0]
scan_list = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
# Re-process the data
if new_mode == 'Plot Raw':
if self._multiScanList is None or self._multiScanExp is None:
raise NotImplementedError('Experiment and scan list are not set up for plot raw.')
self._myControl.scale_to_raw_monitor_counts(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size)
else:
self._myControl.reset_to_normalized(self._multiScanExp, self._multiScanList, min_x, max_x, bin_size)
# Clear image
canvas = self.ui.graphicsView_mergeRun
canvas.clearAllLines()
canvas.clearCanvas()
canvas.resetLineColorStyle()
# Plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
for scan_no in scan_list:
label = "Exp %s Scan %s" % (str(exp_no), str(scan_no))
self._plotReducedData(exp_no, scan_no, canvas, xlabel, label=label, clearcanvas=False)
# Change the button name
if new_mode == 'Plot Raw':
self.ui.pushButton_plotRawMultiScans.setText('Plot Normalized')
else:
self.ui.pushButton_plotRawMultiScans.setText('Plot Raw')
def doPlotRawPtMain(self):
""" Plot current raw detector signal for a specific Pt.
"""
# Get experiment number and scan number for data file
try:
expno = self._getInteger(self.ui.lineEdit_expNo)
scanno = self._getInteger(self.ui.lineEdit_scanNo)
except EmptyError as e:
self._logError(str(e))
return
# plot options
doOverPlot = bool(self.ui.checkBox_overpltRawDet.isChecked())
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
try:
ptNo = self._getInteger(self.ui.lineEdit_ptNo)
except EmptyError:
ptNo = None
# plot
print("[DB] Plot Raw Detector: PlotMode = %s." % (plotmode))
execstatus = self._plotRawDetSignal(expno, scanno, plotmode, ptNo, doOverPlot)
# set global values if good
if execstatus is True:
self._rawDetPtNo = ptNo
self._rawDetExpNo = expno
self._rawDetScanNo = scanno
self._rawDetPlotMode = plotmode
else:
print("[Error] Execution fails with signal %s. " % (str(execstatus)))
def doPlotRawPtNext(self):
""" Plot next raw detector signals
"""
# Check
if self._rawDetPtNo is not None:
ptno = self._rawDetPtNo + 1
else:
self._logError("Unable to plot previous raw detector \
because Pt. or Detector ID has not been set up yet.")
return
# Get plot mode and plot
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
overplot = bool(self.ui.checkBox_overpltRawDet.isChecked())
execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode,
ptno, overplot)
# update if it is good to plot
if execstatus:
self._rawDetPtNo = ptno
self.ui.lineEdit_ptNo.setText(str(ptno))
def do_enable_excluded_dets(self):
""" Enable or disable the line editor for excluded detectors
:return:
"""
if self.ui.checkBox_useDetExcludeFile.isChecked():
self.ui.lineEdit_detExcluded.setEnabled(True)
else:
self.ui.lineEdit_detExcluded.setDisabled(True)
def do_plot_raw_pt_prev(self):
""" Plot previous raw detector
"""
# Validate input
if self._rawDetPtNo is not None:
ptno = self._rawDetPtNo - 1
else:
self._logError("Unable to plot previous raw detector \
because Pt. or Detector ID has not been set up yet.")
return
# get plot mode and do plt
plotmode = str(self.ui.comboBox_rawDetMode.currentText())
overplot = bool(self.ui.checkBox_overpltRawDet.isChecked())
execstatus = self._plotRawDetSignal(self._rawDetExpNo, self._rawDetScanNo, plotmode,
ptno, overplot)
# update if it is good to plot
if execstatus:
self._rawDetPtNo = ptno
self.ui.lineEdit_ptNo.setText(str(ptno))
def do_plot_sample_log(self):
""" Plot sample log vs. Pt. in tab 'Individual Detector'
"""
expNo = int(self.ui.lineEdit_expNo.text())
scanno = int(self.ui.lineEdit_scanNo.text())
logname = str(self.ui.comboBox_indvDetYLabel.currentText())
self._plotSampleLog(expNo, scanno, logname)
def doReduce2Theta(self):
""" Rebin the data and plot in 2theta for tab 'Normalized'
"""
unit = '2theta'
self._uiReducePlotNoramlized(unit)
def doReduceDSpacing(self):
""" Rebin the data and plot in d-spacing for tab 'Normalized'
"""
# new unit and information
unit = "dSpacing"
self._uiReducePlotNoramlized(unit)
def doReduceQ(self):
""" Rebin the data and plot in momentum transfer Q for tab 'Normalized'
"""
unit = 'Momentum Transfer (Q)'
self._uiReducePlotNoramlized(unit)
def doReduceSetData(self):
""" Reduce multiple data
"""
# Get exp number and list of scans
try:
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanlist = r[1]
except NotImplementedError as e:
self._logError(str(e))
return False
else:
self._multiScanExp = expno
self._multiScanList = scanlist
# Reduce and plot data
unit = str(self.ui.comboBox_mscanUnit.currentText())
xlabel = self._getXLabelFromUnit(unit)
canvas = self.ui.graphicsView_mergeRun
# canvas.clearAllLines() NO NEED
canvas.clearCanvas()
canvas.resetLineColorStyle()
for scan in scanlist:
r = self._uiReduceData(3, unit, expno, scan)
good = r[0]
expno = r[1]
scanno = r[2]
if good is True:
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=False)
else:
self._logError('Failed to reduce Exp %s Scan %s' % (str(expno), str(scanno)))
def doReduceVanadium2Theta(self):
""" Rebin MDEventWorkspaces in 2-theta. for pushButton_rebinD
in vanadium peak strip tab
Suggested workflow
1. Rebin data
2. Calculate vanadium peaks in 2theta
3.
"""
# Reduce data
unit = '2theta'
itab = 4
r = self._uiReduceData(itab, unit)
good = r[0]
expno = r[1]
scanno = r[2]
# Plot reduced data and vanadium peaks
if good is True:
canvas = self.ui.graphicsView_vanPeaks
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=True)
# plot vanadium peaks
vanpeakpos = self._myControl.getVanadiumPeaksPos(expno, scanno)
self.ui.lineEdit_stripVPeaks.setText(str(vanpeakpos))
self._plotPeakIndicators(self.ui.graphicsView_vanPeaks, vanpeakpos)
return good
def doSaveData(self):
""" Save data
"""
# get exp number and scan number
try:
# exp and scan
expno, scanno = self._uiGetExpScanNumber()
# file type
filetype = str(self.ui.comboBox_outputFormat.currentText())
# file name
savedatadir = str(self.ui.lineEdit_outputFileName.text()).strip()
if savedatadir is not None and os.path.exists(savedatadir) is True:
homedir = savedatadir
else:
homedir = os.getcwd()
# launch a dialog to get data
filefilter = "All files (*);;Fullprof (*.dat);;GSAS (*.gsa)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File', homedir, filefilter))
except NotImplementedError as e:
self._logError(str(e))
else:
self._myControl.savePDFile(expno, scanno, filetype, sfilename)
def doSaveMergedScan(self):
""" Save merged scan
"""
homedir = os.getcwd()
filefilter = "Fullprof (*.dat)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter))
self._myControl.saveMergedScan(sfilename, mergeindex=self._lastMergeIndex)
def doSaveMultipleScans(self):
""" Save multiple scans
"""
# Get experiment number and scans
r = self._uiGetExpScanTabMultiScans()
expno = r[0]
scanslist = r[1]
# Get base file name
homedir = os.getcwd()
savedir = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory To Save Fullprof', homedir))
for scanno in scanslist:
sfilename = os.path.join(savedir, "HB2A_Exp%d_Scan%d_FP.dat" % (expno, scanno))
self._myControl.savePDFile(expno, scanno, 'fullprof', sfilename)
def doSaveVanRun(self):
""" Save the vanadium run with peaks removed
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
homedir = os.getcwd()
filefilter = "Fullprof (*.dat)"
sfilename = str(QtGui.QFileDialog.getSaveFileName(self, 'Save File In Fullprof', homedir, filefilter))
self._myControl.saveProcessedVanadium(expno, scanno, sfilename)
def doSmoothVanadiumData(self):
""" Smooth vanadium spectrum
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
smoothparams_str = str(self.ui.lineEdit_smoothParams.text())
# Smooth data
status = self._myControl.smoothVanadiumSpectrum(expno, scanno, smoothparams_str)
if not status:
self._logError("Failed to smooth vanadium data")
# Plot
unit = '2theta'
xlabel = self._getXLabelFromUnit(unit)
label = "Vanadium Exp %d Scan %d FFT-Smooth by %s" % (expno, scanno, smoothparams_str)
self._plotVanadiumRun(expno, scanno, xlabel, label, False, True)
def doSmoothVanadiumApply(self):
""" Apply smoothing effect to vanadium data
"""
# Get experiment number and scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
self._myControl.applySmoothVanadium(expno, scanno, True)
def doSmoothVanadiumUndo(self):
""" Undo smoothing vanadium
"""
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Unable to get exp number and scan number for smoothing vanadium data due to %s." % (
str(e)))
return False
self._myControl.applySmoothVanadium(expno, scanno, False)
def doStripVandiumPeaks(self):
""" Strip vanadium peaks
"""
# Get exp number an scan number
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError("Error to get Exp and Scan due to %s." % (str(e)))
return False
# Default unit
unit = '2theta'
# Get and build binning parameter
xmin, binsize, xmax = self._uiGetBinningParams(itab=4)
if xmin is None:
binparams = '%f' % (binsize)
else:
binparams = '%f,%f,%f' % (xmin, binsize, xmax)
# Strip vanadium peak
good = self._myControl.stripVanadiumPeaks(expno, scanno, binparams, vanpeakposlist=None)
# Plot
if good:
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %d Scan %d Bin = %.5f Vanadium Stripped" % (expno, scanno, binsize)
self._plotVanadiumRun(expno, scanno, xlabel, label, False)
def doUpdateWavelength(self):
""" Update the wavelength to line edit
"""
index = self.ui.comboBox_wavelength.currentIndex()
print("Update wavelength to ", index)
if index == 0:
wavelength = 2.41
elif index == 1:
wavelength = 1.54
elif index == 2:
wavelength = 1.12
else:
wavelength = None
self.ui.lineEdit_wavelength.setText(str(wavelength))
def on_mouseDownEvent(self, event):
""" Respond to pick up a value with mouse down event
Definition of button_press_event is:
button_press_event(x, y, button, dblclick=False, guiEvent=None)
Thus event has x, y and button.
event.button has 3 values:
1: left
2: middle
3: right
"""
# FUTURE: Need to make this work
x = event.xdata
y = event.ydata
button = event.button
if x is not None and y is not None:
# mouse is clicked within graph
if button == 1:
msg = "Mouse 1: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button)
print(msg)
elif button == 2:
msg = "Mouse 2: You've clicked on a bar with coords:\n %f, %f\n and button %d" % (x, y, button)
QtGui.QMessageBox.information(self, "Click!", msg)
elif button == 3:
# right click of mouse will pop up a context-menu
# menu should be self.ui.menu?
menu = QtGui.QMenu(self)
addAction = QtGui.QAction('Add', self)
addAction.triggered.connect(self.addSomething)
menu.addAction(addAction)
rmAction = QtGui.QAction('Remove', self)
rmAction.triggered.connect(self.rmSomething)
menu.addAction(rmAction)
# add other required actions
menu.popup(QtGui.QCursor.pos())
def on_mouseMotion(self, event):
""" Event handler for mouse being detected to move
"""
# prev_x = self._viewMerge_X
# prev_y = self._viewMerge_Y
curx = event.xdata
cury = event.ydata
if curx is None or cury is None:
return
self._viewMerge_X = event.xdata
self._viewMerge_Y = event.ydata
def addSomething(self):
"""
"""
# FUTURE - Need to implement how to deal with this
print("Add scan back to merge")
def rmSomething(self):
"""
"""
# FUTURE - Need to implement how to deal with this
print("Remove a scan from merged data.")
# --------------------------------------------------------------------------
# Private methods to plot data
# --------------------------------------------------------------------------
def _plotIndividualDetCountsVsSampleLog(self, expno, scanno, detid, samplename, raw=True):
""" Plot one specific detector's counts vs. one specified sample log's value
along with all Pts.
For example: detector 11's counts vs. sample_b's value
:param expno:
:param scanno:
:param detid:
:param samplename:
:param raw: boolean whether the output is normalized by monitor counts
:return:
"""
# Validate input
try:
expno = int(expno)
scanno = int(scanno)
detid = int(detid)
samplename = str(samplename)
except ValueError:
raise NotImplementedError("ExpNo, ScanNo or DetID is not integer.")
# Get the array for detector counts vs. sample log value by mapping Pt.
vecx, vecy = self._myControl.getIndividualDetCountsVsSample(expno, scanno,
detid, samplename, raw)
# Clear canvas
self.ui.graphicsView_indvDet.clearCanvas()
# Plot
marker, color = self.ui.graphicsView_indvDet.getNextLineMarkerColorCombo()
self.ui.graphicsView_indvDet.add_plot1d(vec_x=vecx,
vec_y=vecy,
marker=marker,
color=color,
x_label=samplename,
y_label='Counts',
label='DetID = %d' % (detid))
# FUTURE: In future, need to find out how to use self._graphIndDevMode
def _plot_individual_detector_counts(self, expno, scanno, detid, xaxis, resetboundary=False):
""" Plot a specific detector's counts along all experiment points (pt)
:param expno:
:param scanno:
:param detid:
:param xaxis:
:param resetboundary:
:return:
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
detid = int(detid)
plot_error_bar = self.ui.checkBox_indDetErrorBar.isChecked()
plot_normal = self.ui.checkBox_indDetNormByMon.isChecked()
# Reject if data is not loaded
if self._myControl.hasDataLoaded(expno, scanno) is False:
self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno))
return False
# Canvas and line information
canvas = self.ui.graphicsView_indvDet
if canvas not in self._tabLineDict:
self._tabLineDict[canvas] = []
# get data
self._logNotice("Input x-axis is '%s' for plotting individual detector's counts." % (xaxis))
if len(xaxis) == 0:
xaxis = None
vecx, vecy = self._myControl.getIndividualDetCounts(expno, scanno, detid, xaxis, plot_normal)
if not isinstance(vecx, numpy.ndarray):
raise NotImplementedError('vecx, vecy must be numpy arrays.')
if plot_error_bar:
y_err = numpy.sqrt(vecy)
else:
y_err = None
# Plot to canvas
marker, color = canvas.getNextLineMarkerColorCombo()
if xaxis == "" or xaxis == "2theta/Scattering Angle":
xlabel = r'$2\theta$'
else:
xlabel = xaxis
# FUTURE - If it works with any way of plotting, then refactor Pt. with any other sample names
label = "Detector ID: %d" % (detid)
if self._tabLineDict[canvas].count((expno, scanno, detid)) == 0:
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel,
y_label='Counts', label=label, y_err=y_err)
self._tabLineDict[canvas].append((expno, scanno, detid))
if resetboundary:
# Set xmin and xmax about the data for first time
xmin = min(vecx)
xmax = max(vecx)
ymin = min(vecy)
ymax = max(vecy)
else:
# auto setup for image boundary
xmin = min(min(vecx), canvas.getXLimit()[0])
xmax = max(max(vecx), canvas.getXLimit()[1])
ymin = min(min(vecy), canvas.getYLimit()[0])
ymax = max(max(vecy), canvas.getYLimit()[1])
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
# Set canvas mode
# FUTURE: Consider how to use self._graphIndDevMode in future
# self._graphIndDevMode = (xlabel, 'Counts')
return True
def _plotPeakIndicators(self, canvas, peakposlist):
""" Plot indicators for peaks
"""
print("[DB] Peak indicators are at ", peakposlist)
rangey = canvas.getYLimit()
rangex = canvas.getXLimit()
for pos in peakposlist:
if pos >= rangex[0] and pos <= rangex[1]:
vecx = numpy.array([pos, pos])
vecy = numpy.array([rangey[0], rangey[1]])
canvas.add_plot1d(vecx, vecy, color='black', line_style='--')
def _plotRawDetSignal(self, expno, scanno, plotmode, ptno, dooverplot):
""" Plot the counts of all detectors of a certain Pt. in an experiment
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
# Set up canvas and dictionary
canvas = self.ui.graphicsView_Raw
if canvas not in self._tabLineDict:
self._tabLineDict[canvas] = []
# Check whether data exists
if not self._myControl.hasDataLoaded(expno, scanno):
self._logError("File has not been loaded for Exp %d Scan %d. Load data first!" % (expno, scanno))
return
# Get vecx and vecy
if plotmode == "All Pts.":
# Plot all Pts.
vecxylist = self._myControl.getRawDetectorCounts(expno, scanno)
# Clear previous
self.ui.graphicsView_Raw.clearAllLines()
self.ui.graphicsView_Raw.setLineMarkerColorIndex(0)
self._tabLineDict[canvas] = []
elif plotmode == "Single Pts.":
# Plot plot
ptno = int(ptno)
if not dooverplot:
self.ui.graphicsView_Raw.clearAllLines()
self.ui.graphicsView_Raw.setLineMarkerColorIndex(0)
self._tabLineDict[canvas] = []
# Plot one pts.
vecxylist = self._myControl.getRawDetectorCounts(expno, scanno, [ptno])
else:
# Raise exception
raise NotImplementedError("Plot mode %s is not supported." % (plotmode))
# Set up unit/x-label
unit = r"$2\theta$"
# plot
xmin = None
xmax = None
ymin = None
ymax = None
for ptno, vecx, vecy in vecxylist:
# FUTURE: Label is left blank as there can be too many labels
label = 'Pt %d' % (ptno)
# skip if this plot has existed
if self._tabLineDict[canvas].count((expno, scanno, ptno)) == 1:
continue
marker, color = canvas.getNextLineMarkerColorCombo()
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=unit,
y_label='intensity', label=label)
# set up line tuple
self._tabLineDict[canvas].append((expno, scanno, ptno))
# auto setup for image boundary
xmin = min(min(vecx), canvas.getXLimit()[0])
xmax = max(max(vecx), canvas.getXLimit()[1])
ymin = min(min(vecy), canvas.getYLimit()[0])
ymax = max(max(vecy), canvas.getYLimit()[1])
# Reset canvas x-y limit
if xmin is not None:
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
return True
def _plotMergedReducedData(self, mkey, label):
""" Plot the reduced data from merged ...
"""
# get the data
try:
vecx, vecy = self._myControl.getMergedVector(mkey)
except KeyError as e:
self._logError("Unable to retrieve merged reduced data due to %s." % (str(e)))
return
canvas = self.ui.graphicsView_mergeRun
# Clear canvas
canvas.clearAllLines()
canvas.clearCanvas()
# Plot
marker, color = canvas.getNextLineMarkerColorCombo()
xlabel = self._getXLabelFromUnit(self.ui.comboBox_mscanUnit.currentText())
canvas.add_plot1d(vecx, vecy, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label)
xmax = max(vecx)
xmin = min(vecx)
dx = xmax - xmin
ymax = max(vecy)
ymin = min(vecy)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _plotReducedData(self, exp, scan, canvas, xlabel, label=None, clearcanvas=True,
spectrum=0, plot_error=False):
""" Plot reduced data for exp and scan
"""
if spectrum != 0:
raise NotImplementedError("Unable to support spectrum = %d case." % (spectrum))
# whether the data is load
if not self._myControl.hasReducedWS(exp, scan):
self._logWarning("No data to plot!")
return
# get to know whether it is required to clear the image
if clearcanvas:
canvas.clearAllLines()
canvas.setLineMarkerColorIndex(0)
# plot
vec_x, vec_y = self._myControl.getVectorToPlot(exp, scan)
if not isinstance(vec_x, numpy.ndarray):
vec_x = numpy.array(vec_x)
vec_y = numpy.array(vec_y)
# FUTURE - Should check y_err set up correctly in Mantid or not
if plot_error:
raise RuntimeError('Implement how to return y_err ASAP.')
else:
y_err = None
# get the marker color for the line
marker, color = canvas.getNextLineMarkerColorCombo()
# plot
if label is None:
label = "Exp %d Scan %d" % (exp, scan)
canvas.add_plot1d(vec_x, vec_y, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label,
y_err=y_err)
if clearcanvas:
xmax = max(vec_x)
xmin = min(vec_x)
dx = xmax - xmin
ymax = max(vec_y)
ymin = min(vec_y)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _plotSampleLog(self, expno, scanno, samplelogname):
""" Plot the value of a sample log among all Pt.
"""
# Validate input
expno = int(expno)
scanno = int(scanno)
samplelogname = str(samplelogname)
# Reject if data is not loaded
if not self._myControl.hasDataLoaded(expno, scanno):
self._logError("Data file for Exp %d Scan %d has not been loaded." % (expno, scanno))
return False
# Canvas and line information
self._indvDetCanvasMode = 'samplelog'
# pop out the xlabel list
# REFACTOR - Only need to set up once if previous plot has the same setup
if self.ui.comboBox_indvDetXLabel.count() == 0:
floatsamplelognamelist = self._myControl.getSampleLogNames(expno, scanno)
self.ui.comboBox_indvDetXLabel.clear()
self.ui.comboBox_indvDetXLabel.addItems(floatsamplelognamelist)
raise RuntimeError("This X-label combo box should be set up during loading data before.")
xlabel = str(self.ui.comboBox_indvDetXLabel.currentText())
# get data
vecx, vecy = self._myControl.getSampleLogValue(expno, scanno, samplelogname, xlabel)
# Plot to canvas
canvas = self.ui.graphicsView_indvDet
# FUTURE - Clear canvas (think of a case that no need to clear canvas)
canvas.clearCanvas()
# canvas.clearAllLines()
marker, color = canvas.getNextLineMarkerColorCombo()
if xlabel is None:
xlabel = r'Pt'
label = samplelogname
canvas.add_plot1d(vecx, vecy, marker=marker, color=color, x_label=xlabel,
y_label='Counts', label=label)
# auto setup for image boundary
xmin = min(vecx)
xmax = max(vecx)
ymin = min(vecy)
ymax = max(vecy)
dx = xmax - xmin
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.0001, xmax + dx * 0.0001, ymin - dy * 0.0001, ymax + dy * 0.0001)
return True
def _plotVanadiumRun(self, exp, scan, xlabel, label, clearcanvas=False, TempData=False):
""" Plot processed vanadium data
Arguments:
- TempData :: flag whether the vanadium run is a temporary data set
"""
# Check whether the data is load
exp = int(exp)
scan = int(scan)
if not self._myControl.hasReducedWS(exp, scan):
self._logWarning("No data to plot!")
return
# Get data to plot
try:
vecx, vecy = self._myControl.getVectorProcessVanToPlot(exp, scan, TempData)
if not TempData:
vecx, vecyOrig = self._myControl.getVectorToPlot(exp, scan)
diffY = vecyOrig - vecy
except NotImplementedError as e:
errmsg = '[Error] Unable to retrieve processed vanadium spectrum for exp %d scan %d. ' \
'Reason: %s' % (exp, scan, str(e))
QtGui.QMessageBox.information(self, "Return!", errmsg)
return
# Get to know whether it is required to clear the image
canvas = self.ui.graphicsView_vanPeaks
if TempData:
clearcanvas = False
if clearcanvas:
canvas.clearAllLines()
canvas.setLineMarkerColorIndex(0)
# get the marker color for the line
if TempData:
marker = None
color = 'blue'
else:
marker, color = canvas.getNextLineMarkerColorCombo()
# plot
canvas.add_plot1d(vecx, vecy, marker=marker, color=color,
x_label=xlabel, y_label='intensity', label=label)
if not TempData:
canvas.add_plot1d(vecx, diffY, marker='+', color='green',
x_label=xlabel, y_label='intensity', label='Diff')
# reset canvas limits
if clearcanvas:
xmax = max(vecx)
xmin = min(vecx)
dx = xmax - xmin
ymax = max(vecy)
ymin = min(diffY)
dy = ymax - ymin
canvas.setXYLimit(xmin - dx * 0.1, xmax + dx * 0.1, ymin - dy * 0.1, ymax + dy * 0.1)
def _uiDownloadDataFile(self, exp, scan):
""" Download data file according to its exp and scan
Either download the data from a server or copy the data file from local
disk
"""
# Get on hold of raw data file
useserver = self.ui.radioButton_useServer.isChecked()
uselocal = self.ui.radioButton_useLocal.isChecked()
if useserver == uselocal:
self._logError("It is logically wrong to set up server/local dir for data.")
self.ui.radioButton_useServer.setChecked(True)
self.ui.radioButton_useLocal.setChecked(False)
rvalue = False
if self._srcFromServer:
# Use server: build the URl to download data
if not self._serverAddress.endswith('/'):
self._serverAddress += '/'
fullurl = "%s%s/exp%d/Datafiles/%s_exp%04d_scan%04d.dat" % (self._serverAddress,
self._instrument.lower(), exp,
self._instrument.upper(), exp, scan)
print("URL: ", fullurl)
cachedir = str(self.ui.lineEdit_cache.text()).strip()
if not os.path.exists(cachedir):
invalidcache = cachedir
cachedir = os.getcwd()
self.ui.lineEdit_cache.setText(cachedir)
self._logWarning("Cache directory %s is not valid. "
"Using current workspace directory %s as cache." % (invalidcache, cachedir))
filename = '%s_exp%04d_scan%04d.dat' % (self._instrument.upper(), exp, scan)
srcFileName = os.path.join(cachedir, filename)
status, errmsg = HfirPDReductionControl.downloadFile(fullurl, srcFileName)
if not status:
self._logError(errmsg)
srcFileName = None
else:
rvalue = True
elif self._srcAtLocal:
# Data from local
srcFileName = os.path.join(self._localSrcDataDir, "%s/Exp%d_Scan%04d.dat" % (self._instrument, exp, scan))
if os.path.exists(srcFileName):
rvalue = True
else:
raise NotImplementedError("Logic error. Neither downloaded from server.\
Nor from local drive")
return (rvalue, srcFileName)
def _uiGetBinningParams(self, itab):
""" Get binning parameters
Return:
- xmin, binsize, xmax
"""
# Get value
if itab == 2:
xmin = str(self.ui.lineEdit_xmin.text())
xmax = str(self.ui.lineEdit_xmax.text())
binsize = str(self.ui.lineEdit_binsize.text())
elif itab == 3:
xmin = str(self.ui.lineEdit_mergeMinX.text())
xmax = str(self.ui.lineEdit_mergeMaxX.text())
binsize = str(self.ui.lineEdit_mergeBinSize.text())
elif itab == 4:
xmin = str(self.ui.lineEdit_min2Theta.text())
xmax = str(self.ui.lineEdit_max2Theta.text())
binsize = str(self.ui.lineEdit_binsize2Theta.text())
else:
raise NotImplementedError("Binning parameters are not used for %d-th tab." % (itab))
# Parse values
try:
xmin = float(xmin)
xmax = float(xmax)
except ValueError:
xmin = None
xmax = None
else:
if xmin >= xmax:
raise NotImplementedError("set minimum X = %.5f is larger than \
maximum X = %.5f" % (xmin, xmax))
try:
binsize = float(binsize)
except ValueError:
raise NotImplementedError("Error: bins size '%s' is not a float number." % (binsize))
# Fix for merging as xmin and xmax must be same for all scans
if itab == 3 and xmin is None:
xmin = 5.
xmax = 150.
return (xmin, binsize, xmax)
def _uiGetExcludedDetectors(self):
""" Get excluded detectors from input line edit
Return :: list of detector IDs to exclude from reduction
"""
excludedetidlist = []
if self.ui.checkBox_useDetExcludeFile.isChecked():
detids_str = str(self.ui.lineEdit_detExcluded.text()).strip()
status, excludedetidlist = self._getIntArray(detids_str)
if status is False:
self._logError("Extra scans are not a list of integers: %s." % (
str(self.ui.lineEdit_extraScans.text())))
excludedetidlist = []
return excludedetidlist
def _uiGetExpScanNumber(self):
""" Get experiment number and scan number from widgets for merged
"""
expnostr = self.ui.lineEdit_expNo.text()
scannostr = self.ui.lineEdit_scanNo.text()
try:
expno = int(expnostr)
scanno = int(scannostr)
except ValueError:
raise NotImplementedError("Either Exp No '%s' or Scan No '%s \
is not set up right as integer." % (expnostr, scannostr))
return (expno, scanno)
def _uiGetExpScanTabMultiScans(self):
""" Get exp number and scans from tab 3
"""
try:
expno = int(self.ui.lineEdit_expNo.text())
startscan = int(self.ui.lineEdit_scanStart.text())
endscan = int(self.ui.lineEdit_scanEnd.text())
except ValueError as e:
raise RuntimeError("For merging scans, Exp No, Starting scan number and \
end scan number must be given: %s" % (str(e)))
# scans = [startscan, endscan] + [others] - [excluded]
status, extrascanlist = self._getIntArray(str(self.ui.lineEdit_extraScans.text()))
if not status:
raise RuntimeError(extrascanlist)
status, excludedlist = self._getIntArray(str(self.ui.lineEdit_exclScans.text()))
self._logDebug("Excluded list: %s" % (str(excludedlist)))
if not status:
self._logError(excludedlist)
return
scanslist = list(range(startscan, endscan + 1))
scanslist.extend(extrascanlist)
scanslist = list(set(scanslist))
for scan in excludedlist:
scanslist.remove(scan)
return (expno, sorted(scanslist))
def _uiIsBinParamsChange(self, itab, binparams):
""" Check whether current bin parameters are same
as given value
"""
xmin, binsize, xmax = self._uiGetBinningParams(itab)
newbinparams = [xmin, binsize, xmax]
# check binning
same = True
for i in range(3):
par_0 = binparams[i]
par_1 = newbinparams[i]
try:
if abs(float(par_0) - float(par_1)) > 1.0E-6:
same = False
except TypeError:
if par_0 is not None or par_1 is not None:
same = False
if not same:
break
change = not same
if change:
print("[D...............B]", end=' ')
print("%s vs %s " % (str(xmin), str(self._tabBinParamDict[itab][0])), end=' ')
print("%s vs %s " % (str(xmax), str(self._tabBinParamDict[itab][2])), end=' ')
print("%s vs %s " % (str(binsize), str(self._tabBinParamDict[itab][1])))
else:
print("[DB] Rebin = False")
return change
def _uiReduceData(self, itab, unit, expno=None, scanno=None):
""" Rebin and plot by reading GUI widgets' value
Arguments:
- itab : index of the tab. Only 2m 3 and 4 are allowed
- unit : string for target unit
"""
# Experiment number and Scan number
if isinstance(expno, int) and isinstance(scanno, int):
# Call from tab-3 multiple scan
pass
else:
try:
expno, scanno = self._uiGetExpScanNumber()
except NotImplementedError as e:
self._logError(str(e))
return
# Get binning parameter
xmin, binsize, xmax = self._uiGetBinningParams(itab)
# Get wavelength
try:
if itab == 3:
wavelength = float(self._myControl.getWavelength(expno, scanno))
else:
wavelength = float(str(self.ui.lineEdit_wavelength.text()))
except TypeError:
if unit != '2theta':
raise NotImplementedError('Wavelength must be specified for unit %s.' % (unit))
# Get scale factor
try:
scalefactor = self._getFloat(self.ui.lineEdit_normalizeMonitor)
except EmptyError:
scalefactor = None
except ValueError as valueerror:
raise ValueError("Unable to get normalization factor due to %s." % (str(valueerror)))
# Rebin
try:
# rebinned = self._myControl.rebin(expno, scanno, unit, wavelength, xmin, binsize, xmax)
excludeddetlist = self._uiGetExcludedDetectors()
self._myControl.reduceSpicePDData(expno, scanno,
unit, xmin, xmax, binsize, wavelength, excludeddetlist, scalefactor)
# Record binning
self._tabBinParamDict[itab] = [xmin, binsize, xmax]
except NotImplementedError as e:
self._logError(str(e))
return (False, expno, scanno)
return (True, expno, scanno)
def _uiReducePlotNoramlized(self, unit):
""" Support Reduce2Theta, ReduceDspacing and ReduceQ
"""
itab = 2
canvas = self.ui.graphicsView_reducedData
expno, scanno = self._uiGetExpScanNumber()
change = self._uiIsBinParamsChange(itab, self._tabBinParamDict[itab])
# check whether line record
if unit == self._currUnit and self._tabLineDict[itab].count((expno, scanno)) > 0 and not change:
# there is no need to plot again as line exists
return
# reduce
r = self._uiReduceData(2, unit)
good = r[0]
expno = r[1]
scanno = r[2]
# failed to reduce
if not good:
self._logError("Failed to reduce Exp %d Scan %d" % (expno, scanno))
return
# clear canvas???
if unit != self._currUnit:
clearcanvas = True
elif not self.ui.checkBox_clearPrevious.isChecked():
# NOTE: naming of the widget is VERY confusing. Should be changed to keepPrevious
clearcanvas = True
else:
clearcanvas = False
# reset record dictionary if unit is different from present
if clearcanvas:
self._tabLineDict[itab] = []
self._currUnit = unit
self._tabLineDict[itab].append((expno, scanno))
xlabel = self._getXLabelFromUnit(unit)
label = "Exp %s Scan %s" % (str(expno), str(scanno))
self._plotReducedData(expno, scanno, canvas, xlabel, label=label, clearcanvas=clearcanvas)
def _logDebug(self, dbinfo):
""" Log debug information
"""
print(dbinfo)
def _logError(self, errinfo):
""" Log error information
"""
QtGui.QMessageBox.information(self, "Click!", errinfo)
def _logNotice(self, loginfo):
""" Log error information
"""
msg = '[Notice] %s' % loginfo
print(msg)
# QtGui.QMessageBox.information(self, "Click!", msg)
def _logWarning(self, warning_info):
""" Log error information
"""
msg = "[Warning]: %s" % (warning_info)
QtGui.QMessageBox.information(self, "OK!", msg)
def _getFloat(self, lineedit):
""" Get integer from line edit
Exception: ValueError if empty or no input
"""
valuestr = str(lineedit.text()).strip()
if len(valuestr) == 0:
raise EmptyError("Input is empty. It cannot be converted to float.")
try:
value = float(valuestr)
except ValueError as e:
raise e
return value
def _getInteger(self, lineedit):
""" Get integer from line edit
"""
valuestr = str(lineedit.text()).strip()
if len(valuestr) == 0:
raise EmptyError("Input is empty. It cannot be converted to integer.")
try:
value = int(valuestr)
except ValueError as e:
raise e
return value
def _getIntArray(self, intliststring):
""" Validate whether the string can be divided into integer strings.
Allowed: a, b, c-d, e, f
Return :: 2-tuple (status, list/error message)
"""
intliststring = str(intliststring)
if intliststring == "":
return (True, [])
# Split by ","
termlevel0s = intliststring.split(",")
intlist = []
# For each term
errmsg = ""
returnstatus = True
for level0term in termlevel0s:
level0term = level0term.strip()
# split upon dash -
numdashes = level0term.count("-")
if numdashes == 0:
# one integer
valuestr = level0term
try:
intvalue = int(valuestr)
if str(intvalue) != valuestr:
returnstatus = False
errmsg = "Contains non-integer string %s." % (valuestr)
except ValueError:
returnstatus = False
errmsg = "String %s is not an integer." % (valuestr)
else:
intlist.append(intvalue)
elif numdashes == 1:
# Integer range
twoterms = level0term.split("-")
templist = []
for i in range(2):
valuestr = twoterms[i]
try:
intvalue = int(valuestr)
if str(intvalue) != valuestr:
returnstatus = False
errmsg = "Contains non-integer string %s." % (valuestr)
except ValueError:
returnstatus = False
errmsg = "String %s is not an integer." % (valuestr)
else:
templist.append(intvalue)
# break loop
if not returnstatus:
break
intlist.extend(range(templist[0], templist[1] + 1))
else:
# Undefined siutation
returnstatus = False
errmsg = "Term %s contains more than 1 dash." % (level0term)
# break loop if something is wrong
if not returnstatus:
break
# Return with false
if not returnstatus:
return (False, errmsg)
return (True, intlist)
def _getXLabelFromUnit(self, unit):
""" Get X-label from unit
"""
if unit == '2theta':
xlabel = r'$2\theta$ (Degrees)'
elif unit == 'dSpacing':
xlabel = r"d $(\AA)$"
elif unit == 'Momentum Transfer (Q)':
xlabel = r"Q $(\AA^{-1})$"
else:
xlabel = 'Wacky Unknown'
return xlabel
|
gpl-3.0
|
windyuuy/opera
|
chromium/src/third_party/python_26/Lib/encodings/cp500.py
|
593
|
13377
|
""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-3-clause
|
whitehats/monitowl-agent
|
whmonit/client/sensors/ping/linux_01.py
|
1
|
1198
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
network ping sensor.
'''
from ping import do_one as ping
from whmonit.client.sensors import TaskSensorBase
from whmonit.common.units import unit_reg
class Sensor(TaskSensorBase):
'''Generic 'ping' sensor.'''
# W0232: Class has no __init__ method
# R0201: Method could be a function
# R0903: Too few public methods
# pylint: disable=W0232,R0201,R0903
name = 'ping'
streams = {
'default': {
'type': float,
'description':
'Time from sending message to destination host '
'to receiving acknowledgment.',
'unit': str(unit_reg.second)
}
}
config_schema = {
'$schema': 'http://json-schema.org/schema#',
'type': 'object',
'properties': {'host': {'type': 'string'}},
'required': ['host'],
'additionalProperties': False
}
def do_run(self):
'''Returns time to ping a host.'''
try:
delay = float(ping(self.config['host'], 5))
except: # TODO: should be time TimeOut and some other errors
return ()
return (("default", delay), )
|
apache-2.0
|
jlspyaozhongkai/Uter
|
third_party_backup/Python-2.7.9/Lib/json/tests/__init__.py
|
145
|
2452
|
import os
import sys
import json
import doctest
import unittest
from test import test_support
# import json with and without accelerations
cjson = test_support.import_fresh_module('json', fresh=['_json'])
pyjson = test_support.import_fresh_module('json', blocked=['_json'])
# create two base classes that will be used by the other tests
class PyTest(unittest.TestCase):
json = pyjson
loads = staticmethod(pyjson.loads)
dumps = staticmethod(pyjson.dumps)
@unittest.skipUnless(cjson, 'requires _json')
class CTest(unittest.TestCase):
if cjson is not None:
json = cjson
loads = staticmethod(cjson.loads)
dumps = staticmethod(cjson.dumps)
# test PyTest and CTest checking if the functions come from the right module
class TestPyTest(PyTest):
def test_pyjson(self):
self.assertEqual(self.json.scanner.make_scanner.__module__,
'json.scanner')
self.assertEqual(self.json.decoder.scanstring.__module__,
'json.decoder')
self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__,
'json.encoder')
class TestCTest(CTest):
def test_cjson(self):
self.assertEqual(self.json.scanner.make_scanner.__module__, '_json')
self.assertEqual(self.json.decoder.scanstring.__module__, '_json')
self.assertEqual(self.json.encoder.c_make_encoder.__module__, '_json')
self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__,
'_json')
here = os.path.dirname(__file__)
def test_suite():
suite = additional_tests()
loader = unittest.TestLoader()
for fn in os.listdir(here):
if fn.startswith("test") and fn.endswith(".py"):
modname = "json.tests." + fn[:-3]
__import__(modname)
module = sys.modules[modname]
suite.addTests(loader.loadTestsFromModule(module))
return suite
def additional_tests():
suite = unittest.TestSuite()
for mod in (json, json.encoder, json.decoder):
suite.addTest(doctest.DocTestSuite(mod))
suite.addTest(TestPyTest('test_pyjson'))
suite.addTest(TestCTest('test_cjson'))
return suite
def main():
suite = test_suite()
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
main()
|
gpl-3.0
|
UniMOOC/AAClassroom
|
tests/functional/um_asmdates_service.py
|
1
|
2567
|
from datetime import datetime
from datetime import timedelta
from mock import patch
from mock import MagicMock
from functional import actions
from modules.um_assessments.model import AsmDateDAO
from modules.um_assessments.service import AsmDates
START = datetime.utcnow() - timedelta(days=1)
END = datetime.utcnow() + timedelta(days=1)
BEFORE = datetime.utcnow() - timedelta(days=2)
AFTER = datetime.utcnow() + timedelta(days=2)
MOCK_DATE = MagicMock(start_date=START, end_date=END)
DATES_LIST = [
MOCK_DATE
]
class AsmDatesTest(actions.TestBase):
@patch.object(AsmDateDAO, 'create', return_value=MagicMock())
def test_create(self, mock_create):
asm_date = AsmDates.create('madrid-02', "1", START, END, 1)
mock_create.assert_called_with('madrid-02', "1", START, END, 1, False)
self.assertEquals(asm_date, mock_create.return_value)
@patch.object(AsmDateDAO, 'get', return_value=MagicMock())
def test_get(self, mock_get):
asm_date = AsmDates.get('madrid-02', "1")
mock_get.assert_called_with('madrid-02', "1")
self.assertEquals(asm_date, mock_get.return_value)
@patch.object(AsmDateDAO, 'get', return_value=DATES_LIST)
def test_get_todays(self, mock_get):
asm_date = AsmDates.get_todays('madrid-02', "1")
mock_get.assert_called_with('madrid-02', "1")
self.assertEquals(asm_date, MOCK_DATE)
@patch.object(AsmDateDAO, 'get', return_value=DATES_LIST)
@patch.object(AsmDates, '_utcnow', return_value=BEFORE)
def test_get_todays_before_date(self, mock_now, mock_get):
asm_date = AsmDates.get_todays('madrid-02', "1")
mock_get.assert_called_with('madrid-02', "1")
self.assertIsNone(asm_date)
@patch.object(AsmDateDAO, 'get', return_value=DATES_LIST)
@patch.object(AsmDates, '_utcnow', return_value=AFTER)
def test_get_todays_after_date(self, mock_now, mock_get):
asm_date = AsmDates.get_todays('madrid-02', "1")
mock_get.assert_called_with('madrid-02', "1")
self.assertIsNone(asm_date)
@patch.object(AsmDates, 'get_todays', return_value=MOCK_DATE)
def test_is_open(self, mock_todays):
is_open = AsmDates.is_open('madrid-02', "1")
mock_todays.assert_called_with('madrid-02', "1")
self.assertTrue(is_open)
@patch.object(AsmDates, 'get_todays', return_value=None)
def test_is_not_open(self, mock_todays):
is_open = AsmDates.is_open('madrid-02', "1")
mock_todays.assert_called_with('madrid-02', "1")
self.assertFalse(is_open)
|
apache-2.0
|
Antiun/stock-logistics-workflow
|
stock_ownership_by_move/model/picking.py
|
17
|
2156
|
# -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import models, api
from collections import defaultdict
class Picking(models.Model):
_inherit = 'stock.picking'
@api.model
def _prepare_pack_ops(self, picking, quants, forced_qties):
"""Get the owner from the moves instead of the picking.
The only case we need to fix is the one of receptions. In that case, we
do not receive any quants (because there is no quant reservation). We
group the moves by product and owner, and run the original method
separately for each one.
"""
if quants:
return super(Picking, self)._prepare_pack_ops(picking, quants,
forced_qties)
grouped = defaultdict(list)
ops_data = []
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed'):
continue
grouped[(move.product_id, move.restrict_partner_id)].append(move)
for product, owner in grouped:
qty = sum(m.product_qty for m in grouped[(product, owner)])
op_data = super(Picking, self)._prepare_pack_ops(picking, quants,
{product: qty})
for x in op_data:
x['owner_id'] = owner.id
ops_data += op_data
return ops_data
|
agpl-3.0
|
josschne/BabyWomp
|
cocos2d/tools/android-mk-generator/android_mk_generator.py
|
16
|
3306
|
#!/usr/bin/python
import sys
import os
import os.path
import cStringIO
import re
def get_cur_dir():
path = sys.path[0]
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
CUR_DIR = get_cur_dir()
COCOS_ROOT = os.path.abspath(os.path.join(CUR_DIR, "../../"))
CONFIG = os.path.abspath(os.path.join(CUR_DIR, "./config.py"))
# print 'CONFIG:' + CONFIG
# print 'COCOS_ROOT:' + COCOS_ROOT
try:
import PathUtils
except ImportError, e:
sys.path.append(os.path.abspath(os.path.join(CUR_DIR, "../pylib")))
import PathUtils
def gen_android_mk(mkfile, pathes, suffix = ("c", "cpp",), exclude = ()):
utils = PathUtils.PathUtils(COCOS_ROOT)
filelst = utils.find_files(pathes, suffix, exclude)
# generate file list string
filestrio = cStringIO.StringIO()
mkfilepath = os.path.dirname(os.path.join(COCOS_ROOT, mkfile))
for filename in filelst:
filestrio.write(' \\\n')
filepath = os.path.relpath(filename, mkfilepath)
filestrio.write(filepath.replace('\\', '/'))
filestrio.write('\n')
# read mk file
file = open(os.path.join(COCOS_ROOT, mkfile))
mkstrio = cStringIO.StringIO()
rep = re.compile("\s*LOCAL_SRC_FILES\s*:=")
try:
# read lines before encounter "LOCAL_EXPORT_C_INCLUDES"
for line in file:
if rep.match(line):
mkstrio.write("LOCAL_SRC_FILES :=")
break
else:
mkstrio.write(line)
#mkstrio.write('\n')
# write file list
mkstrio.write(filestrio.getvalue())
# write remaining lines
delete = True if line[len(line) - 2] == '\\' else False
for line in file:
if delete:
delete = True if line[len(line) - 2] == '\\' else False
else:
mkstrio.write(line)
#mkstrio.write('\n')
finally:
file.close()
file = open(os.path.join(COCOS_ROOT, mkfile), "w")
file.write(mkstrio.getvalue())
file.close()
filestrio.close()
mkstrio.close()
def main():
config = open(CONFIG)
params = eval(config.read())
config.close()
for param in params:
gen_android_mk(**param)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-c', '--config',
type='string',
dest='config',
help="config file path.")
parser.add_option('-r', '--rootpath',
action='store',
dest='rootpath',
help='class root path for mkfile, pathes, exclude.')
options, args = parser.parse_args()
if options.config:
CONFIG = os.path.abspath(os.path.join(os.curdir, options.config))
if options.rootpath:
COCOS_ROOT = os.path.abspath(os.path.join(os.curdir, options.rootpath))
# print 'CONFIG:', CONFIG
# print 'COCOS_ROOT:', COCOS_ROOT
error = ''
if not os.path.isfile(CONFIG):
error+='config must be file.\n'
if not os.path.isdir(COCOS_ROOT):
error+='rootpath must be directory.\n'
if error != '':
parser.exit(2, "{exception}".format(exception=error))
sys.exit(main())
|
mit
|
jeffreywolf/afproj
|
afproj.py
|
1
|
6530
|
#! /usr/bin/env python
"""
Affine spatial transformation
"""
import numpy as np
from numpy.linalg import inv
import argparse, sys, csv, os, time
def getArgs():
parser = argparse.ArgumentParser(
description = """Affine spatial transformation"""
)
parser.add_argument(
"-c",
"--controlPoints",
type = str,
required = True,
help = "Control points csv file. See README doc for formating instructions."
)
parser.add_argument(
"-u",
"--unprojectPoints",
type = str,
required = True,
help = "Unprojected points on plot coordinate system"
)
parser.add_argument(
"-x",
"--xname",
type = str,
required = True,
help = "field name for x-coordinate in unprojected points file"
)
parser.add_argument(
"-y",
"--yname",
type = str,
required = True,
help = "field name for y-coordinate in unprojected points file"
)
parser.add_argument(
"-i",
"--uid",
type = str,
required = True,
help = "field name for unique identifier of point in unprojected points file"
)
parser.add_argument(
"-n",
"--nsims",
type = str,
required = False,
help = """Number of simulations"""
)
parser.add_argument(
"-o",
"--output",
type = str,
required = True,
help = """Output file prefix"""
)
parser.add_argument(
"-v",
"--verbose",
action = "store_true",
help = "Print status updates while executing"
)
return parser.parse_args()
def getIndex(header, item):
"""Get index of variable
"""
for i, elem in enumerate(header):
if elem.lower() == item.lower():
return i
return None
def getData(path, fields):
"""Read unprojected points file as numpy array"""
data = []
with open(path, 'rUb') as f:
indata = csv.reader(f)
var_indices = []
for i, line in enumerate(indata):
if i == 0:
header = line
indices = [getIndex(header, item) for item in fields]
continue
row = [line[index] for index in indices]
data.append(row)
data = np.array(data, dtype = np.float64)
header = [header[index] for index in indices]
return header, data
def getControl(path):
"""Read control points file as numpy array
"""
data = []
with open(path, "rU") as f:
for i, line in enumerate(f):
ls = line.strip().split(',')
if i == 0:
header = ls
continue
data.append(ls)
data = np.array(data, dtype = np.float64)
return header, data
def update(line):
"""Convert uid or nsim column to integer
"""
row = []
for i, elem in enumerate(line):
if i == 0:
row.append(int(elem))
elif i == 1 and len(line) == 6:
row.append(int(elem))
else:
row.append(float(elem))
return row
def writeOut(data, header, filename, verbose):
"""Write data to a file.
"""
if verbose:
print "\nInitializing {0} file output".format(filename)
if header is not None:
with open(filename, "w") as f:
header_str = ",".join(header)+"\n"
f.write(header_str)
for line in data:
line = update(line)
row = ",".join([str(elem) for elem in line])+"\n"
f.write(row)
else:
with open(filename, "a") as f:
for line in data:
line = update(line)
row = ",".join([str(elem) for elem in line])+"\n"
f.write(row)
if verbose:
print "Wrote {0} to disk\n".format(filename)
def fit(X, y):
"""Fit using matrix algebra
"""
XTXinv = inv(np.dot(X.T, X))
XTy = np.dot(X.T, y)
B = np.dot(XTXinv, XTy)
return B
def affine_parameterization(utm_e, utm_n, x, y):
"""Parameterize affine function
"""
X = np.column_stack(
(
x,
y,
np.ones(len(x))
)
)
affine_x = fit(X, utm_e)
affine_y = fit(X, utm_n)
return affine_x, affine_y
def affine_transformation(X_unprj, affine_x, affine_y, args, header):
"""Predict with affine function
"""
x_pred = np.dot(X_unprj, affine_x)
y_pred = np.dot(X_unprj, affine_y)
return x_pred, y_pred
def main():
t_i = time.time()
args = getArgs()
initDir = os.getcwd()
fields = [
args.uid,
args.xname,
args.yname
]
np.random.seed(10)
header, data = getData(args.unprojectPoints, fields)
cp_header, cp = getControl(args.controlPoints)
# Determine whether or not to run simulation
if cp.shape[1] == 6 and args.nsims is not None:
sim = True
if args.verbose:
print "Will simulate {} realizations of corners.".format(args.nsims)
elif args.nsims is None:
sim = False
elif cp.shape[1] == 4:
sim = False
if args.nsims is not None:
if args.verbose:
print "Cannot simulate error because no utm_e and utm_n se's"
print "are included in control points file."
else:
print "Incorrect dimensions for control points file. Exiting."
sys.exit(1)
# Affine Spatial Transformation Parameterization
# x' = Ax + By + C
# y' = Dx + Ey + F
utm_e, utm_n = cp[:,0].flatten(), cp[:,1].flatten()
x, y = cp[:,2].flatten(), cp[:,3].flatten()
affine_x, affine_y = affine_parameterization(utm_e, utm_n, x, y )
uids = data[:, getIndex(header, args.uid)].flatten()
gx = data[:, getIndex(header, args.xname)].flatten()
gy = data[:, getIndex(header, args.yname)].flatten()
X_unprj = np.column_stack(
(
gx,
gy,
np.ones(len(gx))
)
)
x_pred, y_pred = affine_transformation(X_unprj, affine_x, affine_y, args, header)
projected_data = np.column_stack(
(
uids,
gx,
gy,
x_pred,
y_pred
)
)
projected_data_header = ["uid", "gx", "gy", cp_header[0], cp_header[1]]
writeOut(
projected_data,
projected_data_header,
args.output+".csv",
args.verbose
)
# Simulated locations
if sim:
utm_se_e = cp[:,4]
utm_se_n = cp[:,5]
sim_data_header = ["iter", "uid", "gx", "gy", cp_header[0], cp_header[1]]
for i in range(int(args.nsims)):
if args.verbose:
print "Simulation number {}".format(i+1)
evec = np.zeros(cp.shape[0])
nvec = np.zeros(cp.shape[0])
for j, row in enumerate(cp):
evec[j] = np.random.normal(row[0],row[4], 1)
nvec[j] = np.random.normal(row[1],row[5], 1)
affine_x, affine_y = affine_parameterization(evec, nvec, x, y)
x_pred, y_pred = affine_transformation(
X_unprj,
affine_x,
affine_y,
args,
header
)
iteration = i + np.zeros(x_pred.shape[0])
sim_data = np.column_stack(
(
iteration,
uids,
gx,
gy,
x_pred,
y_pred
)
)
if i == 0:
writeOut(
sim_data,
sim_data_header,
args.output+"-sim.csv",
args.verbose
)
else:
writeOut(
sim_data,
None,
args.output+"-sim.csv",
args.verbose
)
t_f = time.time()
if args.verbose:
print "Spatially adjusted data in {} seconds".format(t_f - t_i)
if __name__ == "__main__":
main()
|
mit
|
TieWei/nova
|
nova/scheduler/filters/image_props_filter.py
|
9
|
3909
|
# Copyright (c) 2011-2012 OpenStack Foundation
# Copyright (c) 2012 Canonical Ltd
# Copyright (c) 2012 SUSE LINUX Products GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
any architecture, hypervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
# Image Properties and Compute Capabilities do not change within
# a request
run_filter_once_per_request = True
def _instance_supported(self, host_state, image_props):
img_arch = image_props.get('architecture', None)
img_h_type = image_props.get('hypervisor_type', None)
img_vm_mode = image_props.get('vm_mode', None)
checked_img_props = (img_arch, img_h_type, img_vm_mode)
# Supported if no compute-related instance properties are specified
if not any(checked_img_props):
return True
supp_instances = host_state.supported_instances
# Not supported if an instance property is requested but nothing
# advertised by the host.
if not supp_instances:
LOG.debug(_("Instance contains properties %(image_props)s, "
"but no corresponding supported_instances are "
"advertised by the compute node"),
{'image_props': image_props})
return False
def _compare_props(props, other_props):
for i in props:
if i and i not in other_props:
return False
return True
for supp_inst in supp_instances:
if _compare_props(checked_img_props, supp_inst):
LOG.debug(_("Instance properties %(image_props)s "
"are satisfied by compute host supported_instances"
"%(supp_instances)s"),
{'image_props': image_props,
'supp_instances': supp_instances})
return True
LOG.debug(_("Instance contains properties %(image_props)s "
"that are not provided by the compute node "
"supported_instances %(supp_instances)s"),
{'image_props': image_props,
'supp_instances': supp_instances})
return False
def host_passes(self, host_state, filter_properties):
"""Check if host passes specified image properties.
Returns True for compute nodes that satisfy image properties
contained in the request_spec.
"""
spec = filter_properties.get('request_spec', {})
image_props = spec.get('image', {}).get('properties', {})
if not self._instance_supported(host_state, image_props):
LOG.debug(_("%(host_state)s does not support requested "
"instance_properties"), {'host_state': host_state})
return False
return True
|
apache-2.0
|
tdtrask/ansible
|
lib/ansible/modules/network/cloudengine/ce_vxlan_arp.py
|
22
|
24179
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_vxlan_arp
version_added: "2.4"
short_description: Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
description:
- Manages ARP attributes of VXLAN on HUAWEI CloudEngine devices.
author: QijunPan (@CloudEngine-Ansible)
options:
evn_bgp:
description:
- Enables EVN BGP.
required: false
choices: ['enable', 'disable']
default: null
evn_source_ip:
description:
- Specifies the source address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_peer_ip:
description:
- Specifies the IP address of an EVN BGP peer.
The value is in dotted decimal notation.
required: false
default: null
evn_server:
description:
- Configures the local device as the router reflector (RR) on the EVN network.
required: false
choices: ['enable', 'disable']
default: null
evn_reflect_client:
description:
- Configures the local device as the route reflector (RR) and its peer as the client.
required: false
choices: ['enable', 'disable']
default: null
vbdif_name:
description:
- Full name of VBDIF interface, i.e. Vbdif100.
required: false
default: null
arp_collect_host:
description:
- Enables EVN BGP or BGP EVPN to collect host information.
required: false
choices: ['enable', 'disable']
default: null
host_collect_protocol:
description:
- Enables EVN BGP or BGP EVPN to advertise host information.
required: false
choices: ['bgp','none']
default: null
bridge_domain_id:
description:
- Specifies a BD(bridge domain) ID.
The value is an integer ranging from 1 to 16777215.
required: false
default: null
arp_suppress:
description:
- Enables ARP broadcast suppression in a BD.
required: false
choices: ['enable', 'disable']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
- name: vxlan arp module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure EVN BGP on Layer 2 and Layer 3 VXLAN gateways to establish EVN BGP peer relationships.
ce_vxlan_arp:
evn_bgp: enable
evn_source_ip: 6.6.6.6
evn_peer_ip: 7.7.7.7
provider: "{{ cli }}"
- name: Configure a Layer 3 VXLAN gateway as a BGP RR.
ce_vxlan_arp:
evn_bgp: enable
evn_server: enable
provider: "{{ cli }}"
- name: Enable EVN BGP on a Layer 3 VXLAN gateway to collect host information.
ce_vxlan_arp:
vbdif_name: Vbdif100
arp_collect_host: enable
provider: "{{ cli }}"
- name: Enable Layer 2 and Layer 3 VXLAN gateways to use EVN BGP to advertise host information.
ce_vxlan_arp:
host_collect_protocol: bgp
provider: "{{ cli }}"
- name: Enable ARP broadcast suppression on a Layer 2 VXLAN gateway.
ce_vxlan_arp:
bridge_domain_id: 100
arp_suppress: enable
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip":"7.7.7.7", state: "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"evn_bgp": "disable", "evn_source_ip": null, "evn_peer_ip": []}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"evn_bgp": "enable", "evn_source_ip": "6.6.6.6", "evn_peer_ip": ["7.7.7.7"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evn bgp",
"source-address 6.6.6.6",
"peer 7.7.7.7"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if addr.count('.') == 3:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def get_evn_peers(config):
"""get evn peer ip list"""
get = re.findall(r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return list(set(get))
def get_evn_srouce(config):
"""get evn peer ip list"""
get = re.findall(
r"source-address ([0-9]+.[0-9]+.[0-9]+.[0-9]+)", config)
if not get:
return None
else:
return get[0]
def get_evn_reflect_client(config):
"""get evn reflect client list"""
get = re.findall(
r"peer ([0-9]+.[0-9]+.[0-9]+.[0-9]+)\s*reflect-client", config)
if not get:
return None
else:
return list(get)
class VxlanArp(object):
"""
Manages arp attributes of VXLAN.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.evn_bgp = self.module.params['evn_bgp']
self.evn_source_ip = self.module.params['evn_source_ip']
self.evn_peer_ip = self.module.params['evn_peer_ip']
self.evn_server = self.module.params['evn_server']
self.evn_reflect_client = self.module.params['evn_reflect_client']
self.vbdif_name = self.module.params['vbdif_name']
self.arp_collect_host = self.module.params['arp_collect_host']
self.host_collect_protocol = self.module.params[
'host_collect_protocol']
self.bridge_domain_id = self.module.params['bridge_domain_id']
self.arp_suppress = self.module.params['arp_suppress']
self.state = self.module.params['state']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.config = "" # current config
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
required_together = [("vbdif_name", "arp_collect_host"), ("bridge_domain_id", "arp_suppress")]
self.module = AnsibleModule(argument_spec=self.spec,
required_together=required_together,
supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = "| ignore-case section include evn bgp|host collect protocol bgp"
if self.vbdif_name:
exp += "|^interface %s$" % self.vbdif_name
if self.bridge_domain_id:
exp += "|^bridge-domain %s$" % self.bridge_domain_id
flags.append(exp)
config = get_config(self.module, flags)
return config
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def config_bridge_domain(self):
"""manage bridge domain configuration"""
if not self.bridge_domain_id:
return
# bridge-domain bd-id
# [undo] arp broadcast-suppress enable
cmd = "bridge-domain %s" % self.bridge_domain_id
if not is_config_exist(self.config, cmd):
self.module.fail_json(msg="Error: Bridge domain %s is not exist." % self.bridge_domain_id)
cmd = "arp broadcast-suppress enable"
exist = is_config_exist(self.config, cmd)
if self.arp_suppress == "enable" and not exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_suppress == "disable" and exist:
self.cli_add_command("bridge-domain %s" % self.bridge_domain_id)
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_evn_bgp(self):
"""enables EVN BGP and configure evn bgp command"""
evn_bgp_view = False
evn_bgp_enable = False
cmd = "evn bgp"
exist = is_config_exist(self.config, cmd)
if self.evn_bgp == "enable" or exist:
evn_bgp_enable = True
# [undo] evn bgp
if self.evn_bgp:
if self.evn_bgp == "enable" and not exist:
self.cli_add_command(cmd)
evn_bgp_view = True
elif self.evn_bgp == "disable" and exist:
self.cli_add_command(cmd, undo=True)
return
# [undo] source-address ip-address
if evn_bgp_enable and self.evn_source_ip:
cmd = "source-address %s" % self.evn_source_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] peer ip-address
# [undo] peer ipv4-address reflect-client
if evn_bgp_enable and self.evn_peer_ip:
cmd = "peer %s" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
if self.evn_reflect_client == "enable":
self.cli_add_command(
"peer %s reflect-client" % self.evn_peer_ip)
else:
if self.evn_reflect_client:
cmd = "peer %s reflect-client" % self.evn_peer_ip
exist = is_config_exist(self.config, cmd)
if self.evn_reflect_client == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_reflect_client == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
else:
if exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
# [undo] server enable
if evn_bgp_enable and self.evn_server:
cmd = "server enable"
exist = is_config_exist(self.config, cmd)
if self.evn_server == "enable" and not exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd)
elif self.evn_server == "disable" and exist:
if not evn_bgp_view:
self.cli_add_command("evn bgp")
evn_bgp_view = True
self.cli_add_command(cmd, undo=True)
if evn_bgp_view:
self.cli_add_command("quit")
def config_vbdif(self):
"""configure command at the VBDIF interface view"""
# interface vbdif bd-id
# [undo] arp collect host enable
cmd = "interface %s" % self.vbdif_name.lower().capitalize()
exist = is_config_exist(self.config, cmd)
if not exist:
self.module.fail_json(
msg="Error: Interface %s does not exist." % self.vbdif_name)
cmd = "arp collect host enable"
exist = is_config_exist(self.config, cmd)
if self.arp_collect_host == "enable" and not exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd)
self.cli_add_command("quit")
elif self.arp_collect_host == "disable" and exist:
self.cli_add_command("interface %s" %
self.vbdif_name.lower().capitalize())
self.cli_add_command(cmd, undo=True)
self.cli_add_command("quit")
def config_host_collect_protocal(self):
"""Enable EVN BGP or BGP EVPN to advertise host information"""
# [undo] host collect protocol bgp
cmd = "host collect protocol bgp"
exist = is_config_exist(self.config, cmd)
if self.state == "present":
if self.host_collect_protocol == "bgp" and not exist:
self.cli_add_command(cmd)
elif self.host_collect_protocol == "none" and exist:
self.cli_add_command(cmd, undo=True)
else:
if self.host_collect_protocol == "bgp" and exist:
self.cli_add_command(cmd, undo=True)
def is_valid_vbdif(self, ifname):
"""check is interface vbdif is valid"""
if not ifname.upper().startswith('VBDIF'):
return False
bdid = self.vbdif_name.replace(" ", "").upper().replace("VBDIF", "")
if not bdid.isdigit():
return False
if int(bdid) < 1 or int(bdid) > 16777215:
return False
return True
def check_params(self):
"""Check all input params"""
# bridge domain id check
if self.bridge_domain_id:
if not self.bridge_domain_id.isdigit():
self.module.fail_json(
msg="Error: Bridge domain id is not digit.")
if int(self.bridge_domain_id) < 1 or int(self.bridge_domain_id) > 16777215:
self.module.fail_json(
msg="Error: Bridge domain id is not in the range from 1 to 16777215.")
# evn_source_ip check
if self.evn_source_ip:
if not is_valid_v4addr(self.evn_source_ip):
self.module.fail_json(msg="Error: evn_source_ip is invalid.")
# evn_peer_ip check
if self.evn_peer_ip:
if not is_valid_v4addr(self.evn_peer_ip):
self.module.fail_json(msg="Error: evn_peer_ip is invalid.")
# vbdif_name check
if self.vbdif_name:
self.vbdif_name = self.vbdif_name.replace(
" ", "").lower().capitalize()
if not self.is_valid_vbdif(self.vbdif_name):
self.module.fail_json(msg="Error: vbdif_name is invalid.")
# evn_reflect_client and evn_peer_ip must set at the same time
if self.evn_reflect_client and not self.evn_peer_ip:
self.module.fail_json(
msg="Error: evn_reflect_client and evn_peer_ip must set at the same time.")
# evn_server and evn_reflect_client can not set at the same time
if self.evn_server == "enable" and self.evn_reflect_client == "enable":
self.module.fail_json(
msg="Error: evn_server and evn_reflect_client can not set at the same time.")
def get_proposed(self):
"""get proposed info"""
if self.evn_bgp:
self.proposed["evn_bgp"] = self.evn_bgp
if self.evn_source_ip:
self.proposed["evn_source_ip"] = self.evn_source_ip
if self.evn_peer_ip:
self.proposed["evn_peer_ip"] = self.evn_peer_ip
if self.evn_server:
self.proposed["evn_server"] = self.evn_server
if self.evn_reflect_client:
self.proposed["evn_reflect_client"] = self.evn_reflect_client
if self.arp_collect_host:
self.proposed["arp_collect_host"] = self.arp_collect_host
if self.host_collect_protocol:
self.proposed["host_collect_protocol"] = self.host_collect_protocol
if self.arp_suppress:
self.proposed["arp_suppress"] = self.arp_suppress
if self.vbdif_name:
self.proposed["vbdif_name"] = self.evn_peer_ip
if self.bridge_domain_id:
self.proposed["bridge_domain_id"] = self.bridge_domain_id
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
evn_bgp_exist = is_config_exist(self.config, "evn bgp")
if evn_bgp_exist:
self.existing["evn_bgp"] = "enable"
else:
self.existing["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(self.config, "server enable"):
self.existing["evn_server"] = "enable"
else:
self.existing["evn_server"] = "disable"
self.existing["evn_source_ip"] = get_evn_srouce(self.config)
self.existing["evn_peer_ip"] = get_evn_peers(self.config)
self.existing["evn_reflect_client"] = get_evn_reflect_client(
self.config)
if is_config_exist(self.config, "arp collect host enable"):
self.existing["host_collect_protocol"] = "enable"
else:
self.existing["host_collect_protocol"] = "disable"
if is_config_exist(self.config, "host collect protocol bgp"):
self.existing["host_collect_protocol"] = "bgp"
else:
self.existing["host_collect_protocol"] = None
if is_config_exist(self.config, "arp broadcast-suppress enable"):
self.existing["arp_suppress"] = "enable"
else:
self.existing["arp_suppress"] = "disable"
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
evn_bgp_exist = is_config_exist(config, "evn bgp")
if evn_bgp_exist:
self.end_state["evn_bgp"] = "enable"
else:
self.end_state["evn_bgp"] = "disable"
if evn_bgp_exist:
if is_config_exist(config, "server enable"):
self.end_state["evn_server"] = "enable"
else:
self.end_state["evn_server"] = "disable"
self.end_state["evn_source_ip"] = get_evn_srouce(config)
self.end_state["evn_peer_ip"] = get_evn_peers(config)
self.end_state[
"evn_reflect_client"] = get_evn_reflect_client(config)
if is_config_exist(config, "arp collect host enable"):
self.end_state["host_collect_protocol"] = "enable"
else:
self.end_state["host_collect_protocol"] = "disable"
if is_config_exist(config, "host collect protocol bgp"):
self.end_state["host_collect_protocol"] = "bgp"
else:
self.end_state["host_collect_protocol"] = None
if is_config_exist(config, "arp broadcast-suppress enable"):
self.end_state["arp_suppress"] = "enable"
else:
self.end_state["arp_suppress"] = "disable"
def work(self):
"""worker"""
self.check_params()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.evn_bgp or self.evn_server or self.evn_peer_ip or self.evn_source_ip:
self.config_evn_bgp()
if self.vbdif_name and self.arp_collect_host:
self.config_vbdif()
if self.host_collect_protocol:
self.config_host_collect_protocal()
if self.bridge_domain_id and self.arp_suppress:
self.config_bridge_domain()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
evn_bgp=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_source_ip=dict(required=False, type='str'),
evn_peer_ip=dict(required=False, type='str'),
evn_server=dict(required=False, type='str',
choices=['enable', 'disable']),
evn_reflect_client=dict(
required=False, type='str', choices=['enable', 'disable']),
vbdif_name=dict(required=False, type='str'),
arp_collect_host=dict(required=False, type='str',
choices=['enable', 'disable']),
host_collect_protocol=dict(
required=False, type='str', choices=['bgp', 'none']),
bridge_domain_id=dict(required=False, type='str'),
arp_suppress=dict(required=False, type='str',
choices=['enable', 'disable']),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = VxlanArp(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
gpl-3.0
|
raildo/keystone
|
keystone/token/backends/sql.py
|
5
|
7517
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from keystone.common import sql
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import token
class TokenModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'token'
attributes = ['id', 'expires', 'user_id', 'trust_id']
id = sql.Column(sql.String(64), primary_key=True)
expires = sql.Column(sql.DateTime(), default=None)
extra = sql.Column(sql.JsonBlob())
valid = sql.Column(sql.Boolean(), default=True, nullable=False)
user_id = sql.Column(sql.String(64))
trust_id = sql.Column(sql.String(64))
__table_args__ = (
sql.Index('ix_token_expires', 'expires'),
sql.Index('ix_token_valid', 'valid')
)
class Token(sql.Base, token.Driver):
# Public interface
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id=token_id)
session = self.get_session()
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
return token_ref.to_dict()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
if not data_copy.get('expires'):
data_copy['expires'] = token.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
token_ref = TokenModel.from_dict(data_copy)
token_ref.valid = True
session = self.get_session()
with session.begin():
session.add(token_ref)
session.flush()
return token_ref.to_dict()
def delete_token(self, token_id):
session = self.get_session()
with session.begin():
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
token_ref.valid = False
session.flush()
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Deletes all tokens in one session
The user_id will be ignored if the trust_id is specified. user_id
will always be specified.
If using a trust, the token's user_id is set to the trustee's user ID
or the trustor's user ID, so will use trust_id to query the tokens.
"""
session = self.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter_by(valid=True)
query = query.filter(TokenModel.expires > now)
if trust_id:
query = query.filter(TokenModel.trust_id == trust_id)
else:
query = query.filter(TokenModel.user_id == user_id)
for token_ref in query.all():
if tenant_id:
token_ref_dict = token_ref.to_dict()
if not self._tenant_matches(tenant_id, token_ref_dict):
continue
if consumer_id:
token_ref_dict = token_ref.to_dict()
if not self._consumer_matches(consumer_id, token_ref_dict):
continue
token_ref.valid = False
session.flush()
def _tenant_matches(self, tenant_id, token_ref_dict):
return ((tenant_id is None) or
(token_ref_dict.get('tenant') and
token_ref_dict['tenant'].get('id') == tenant_id))
def _consumer_matches(self, consumer_id, ref):
if consumer_id is None:
return True
else:
try:
oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
return oauth and oauth['consumer_id'] == consumer_id
except KeyError:
return False
def _list_tokens_for_trust(self, trust_id):
session = self.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.trust_id == trust_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens_for_user(self, user_id, tenant_id=None):
session = self.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._tenant_matches(tenant_id, token_ref_dict):
tokens.append(token_ref['id'])
return tokens
def _list_tokens_for_consumer(self, user_id, consumer_id):
tokens = []
session = self.get_session()
with session.begin():
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._consumer_matches(consumer_id, token_ref_dict):
tokens.append(token_ref_dict['id'])
session.flush()
return tokens
def list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if trust_id:
return self._list_tokens_for_trust(trust_id)
if consumer_id:
return self._list_tokens_for_consumer(user_id, consumer_id)
else:
return self._list_tokens_for_user(user_id, tenant_id)
def list_revoked_tokens(self):
session = self.get_session()
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
token_references = query.filter_by(valid=False)
for token_ref in token_references:
record = {
'id': token_ref['id'],
'expires': token_ref['expires'],
}
tokens.append(record)
return tokens
def flush_expired_tokens(self):
session = self.get_session()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires < timeutils.utcnow())
query.delete(synchronize_session=False)
session.flush()
|
apache-2.0
|
joopert/home-assistant
|
homeassistant/components/feedreader/__init__.py
|
2
|
8552
|
"""Support for RSS/Atom feeds."""
from datetime import datetime, timedelta
from logging import getLogger
from os.path import exists
from threading import Lock
import pickle
import voluptuous as vol
import feedparser
from homeassistant.const import EVENT_HOMEASSISTANT_START, CONF_SCAN_INTERVAL
from homeassistant.helpers.event import track_time_interval
import homeassistant.helpers.config_validation as cv
_LOGGER = getLogger(__name__)
CONF_URLS = "urls"
CONF_MAX_ENTRIES = "max_entries"
DEFAULT_MAX_ENTRIES = 20
DEFAULT_SCAN_INTERVAL = timedelta(hours=1)
DOMAIN = "feedreader"
EVENT_FEEDREADER = "feedreader"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
vol.Required(CONF_URLS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
vol.Optional(
CONF_MAX_ENTRIES, default=DEFAULT_MAX_ENTRIES
): cv.positive_int,
}
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Feedreader component."""
urls = config.get(DOMAIN)[CONF_URLS]
scan_interval = config.get(DOMAIN).get(CONF_SCAN_INTERVAL)
max_entries = config.get(DOMAIN).get(CONF_MAX_ENTRIES)
data_file = hass.config.path(f"{DOMAIN}.pickle")
storage = StoredData(data_file)
feeds = [
FeedManager(url, scan_interval, max_entries, hass, storage) for url in urls
]
return len(feeds) > 0
class FeedManager:
"""Abstraction over Feedparser module."""
def __init__(self, url, scan_interval, max_entries, hass, storage):
"""Initialize the FeedManager object, poll as per scan interval."""
self._url = url
self._scan_interval = scan_interval
self._max_entries = max_entries
self._feed = None
self._hass = hass
self._firstrun = True
self._storage = storage
self._last_entry_timestamp = None
self._last_update_successful = False
self._has_published_parsed = False
self._event_type = EVENT_FEEDREADER
self._feed_id = url
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, lambda _: self._update())
self._init_regular_updates(hass)
def _log_no_entries(self):
"""Send no entries log at debug level."""
_LOGGER.debug("No new entries to be published in feed %s", self._url)
def _init_regular_updates(self, hass):
"""Schedule regular updates at the top of the clock."""
track_time_interval(hass, lambda now: self._update(), self._scan_interval)
@property
def last_update_successful(self):
"""Return True if the last feed update was successful."""
return self._last_update_successful
def _update(self):
"""Update the feed and publish new entries to the event bus."""
_LOGGER.info("Fetching new data from feed %s", self._url)
self._feed = feedparser.parse(
self._url,
etag=None if not self._feed else self._feed.get("etag"),
modified=None if not self._feed else self._feed.get("modified"),
)
if not self._feed:
_LOGGER.error("Error fetching feed data from %s", self._url)
self._last_update_successful = False
else:
# The 'bozo' flag really only indicates that there was an issue
# during the initial parsing of the XML, but it doesn't indicate
# whether this is an unrecoverable error. In this case the
# feedparser lib is trying a less strict parsing approach.
# If an error is detected here, log error message but continue
# processing the feed entries if present.
if self._feed.bozo != 0:
_LOGGER.error(
"Error parsing feed %s: %s", self._url, self._feed.bozo_exception
)
# Using etag and modified, if there's no new data available,
# the entries list will be empty
if self._feed.entries:
_LOGGER.debug(
"%s entri(es) available in feed %s",
len(self._feed.entries),
self._url,
)
self._filter_entries()
self._publish_new_entries()
if self._has_published_parsed:
self._storage.put_timestamp(
self._feed_id, self._last_entry_timestamp
)
else:
self._log_no_entries()
self._last_update_successful = True
_LOGGER.info("Fetch from feed %s completed", self._url)
def _filter_entries(self):
"""Filter the entries provided and return the ones to keep."""
if len(self._feed.entries) > self._max_entries:
_LOGGER.debug(
"Processing only the first %s entries " "in feed %s",
self._max_entries,
self._url,
)
self._feed.entries = self._feed.entries[0 : self._max_entries]
def _update_and_fire_entry(self, entry):
"""Update last_entry_timestamp and fire entry."""
# Check if the entry has a published date.
if "published_parsed" in entry.keys() and entry.published_parsed:
# We are lucky, `published_parsed` data available, let's make use of
# it to publish only new available entries since the last run
self._has_published_parsed = True
self._last_entry_timestamp = max(
entry.published_parsed, self._last_entry_timestamp
)
else:
self._has_published_parsed = False
_LOGGER.debug("No published_parsed info available for entry %s", entry)
entry.update({"feed_url": self._url})
self._hass.bus.fire(self._event_type, entry)
def _publish_new_entries(self):
"""Publish new entries to the event bus."""
new_entries = False
self._last_entry_timestamp = self._storage.get_timestamp(self._feed_id)
if self._last_entry_timestamp:
self._firstrun = False
else:
# Set last entry timestamp as epoch time if not available
self._last_entry_timestamp = datetime.utcfromtimestamp(0).timetuple()
for entry in self._feed.entries:
if self._firstrun or (
"published_parsed" in entry.keys()
and entry.published_parsed > self._last_entry_timestamp
):
self._update_and_fire_entry(entry)
new_entries = True
else:
_LOGGER.debug("Entry %s already processed", entry)
if not new_entries:
self._log_no_entries()
self._firstrun = False
class StoredData:
"""Abstraction over pickle data storage."""
def __init__(self, data_file):
"""Initialize pickle data storage."""
self._data_file = data_file
self._lock = Lock()
self._cache_outdated = True
self._data = {}
self._fetch_data()
def _fetch_data(self):
"""Fetch data stored into pickle file."""
if self._cache_outdated and exists(self._data_file):
try:
_LOGGER.debug("Fetching data from file %s", self._data_file)
with self._lock, open(self._data_file, "rb") as myfile:
self._data = pickle.load(myfile) or {}
self._cache_outdated = False
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error(
"Error loading data from pickled file %s", self._data_file
)
def get_timestamp(self, feed_id):
"""Return stored timestamp for given feed id (usually the url)."""
self._fetch_data()
return self._data.get(feed_id)
def put_timestamp(self, feed_id, timestamp):
"""Update timestamp for given feed id (usually the url)."""
self._fetch_data()
with self._lock, open(self._data_file, "wb") as myfile:
self._data.update({feed_id: timestamp})
_LOGGER.debug(
"Overwriting feed %s timestamp in storage file %s",
feed_id,
self._data_file,
)
try:
pickle.dump(self._data, myfile)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error saving pickled data to %s", self._data_file)
self._cache_outdated = True
|
apache-2.0
|
bq/web2board
|
res/common/Scons/sconsFiles/SCons/Tool/docbook/__init__.py
|
7
|
30087
|
"""SCons.Tool.docbook
Tool-specific initialization for Docbook.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001-7,2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import glob
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Script
import SCons.Tool
import SCons.Util
# Get full path to this script
scriptpath = os.path.dirname(os.path.realpath(__file__))
# Local folder for the collection of DocBook XSLs
db_xsl_folder = 'docbook-xsl-1.76.1'
# Do we have libxml2/libxslt/lxml?
has_libxml2 = True
has_lxml = True
try:
import libxml2
import libxslt
except:
has_libxml2 = False
try:
import lxml
except:
has_lxml = False
# Set this to True, to prefer xsltproc over libxml2 and lxml
prefer_xsltproc = False
# Regexs for parsing Docbook XML sources of MAN pages
re_manvolnum = re.compile("<manvolnum>([^<]*)</manvolnum>")
re_refname = re.compile("<refname>([^<]*)</refname>")
#
# Helper functions
#
def __extend_targets_sources(target, source):
""" Prepare the lists of target and source files. """
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target[:]
elif not SCons.Util.is_List(source):
source = [source]
if len(target) < len(source):
target.extend(source[len(target):])
return target, source
def __init_xsl_stylesheet(kw, env, user_xsl_var, default_path):
if kw.get('DOCBOOK_XSL','') == '':
xsl_style = kw.get('xsl', env.subst(user_xsl_var))
if xsl_style == '':
path_args = [scriptpath, db_xsl_folder] + default_path
xsl_style = os.path.join(*path_args)
kw['DOCBOOK_XSL'] = xsl_style
def __select_builder(lxml_builder, libxml2_builder, cmdline_builder):
""" Selects a builder, based on which Python modules are present. """
if prefer_xsltproc:
return cmdline_builder
if not has_libxml2:
# At the moment we prefer libxml2 over lxml, the latter can lead
# to conflicts when installed together with libxml2.
if has_lxml:
return lxml_builder
else:
return cmdline_builder
return libxml2_builder
def __ensure_suffix(t, suffix):
""" Ensure that the target t has the given suffix. """
tpath = str(t)
if not tpath.endswith(suffix):
return tpath+suffix
return t
def __ensure_suffix_stem(t, suffix):
""" Ensure that the target t has the given suffix, and return the file's stem. """
tpath = str(t)
if not tpath.endswith(suffix):
stem = tpath
tpath += suffix
return tpath, stem
else:
stem, ext = os.path.splitext(tpath)
return t, stem
def __get_xml_text(root):
""" Return the text for the given root node (xml.dom.minidom). """
txt = ""
for e in root.childNodes:
if (e.nodeType == e.TEXT_NODE):
txt += e.data
return txt
def __create_output_dir(base_dir):
""" Ensure that the output directory base_dir exists. """
root, tail = os.path.split(base_dir)
dir = None
if tail:
if base_dir.endswith('/'):
dir = base_dir
else:
dir = root
else:
if base_dir.endswith('/'):
dir = base_dir
if dir and not os.path.isdir(dir):
os.makedirs(dir)
#
# Supported command line tools and their call "signature"
#
xsltproc_com = {'xsltproc' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE',
'saxon' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE $DOCBOOK_XSLTPROCPARAMS',
'saxon-xslt' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -o $TARGET $DOCBOOK_XSL $SOURCE $DOCBOOK_XSLTPROCPARAMS',
'xalan' : '$DOCBOOK_XSLTPROC $DOCBOOK_XSLTPROCFLAGS -q -out $TARGET -xsl $DOCBOOK_XSL -in $SOURCE'}
xmllint_com = {'xmllint' : '$DOCBOOK_XMLLINT $DOCBOOK_XMLLINTFLAGS --xinclude $SOURCE > $TARGET'}
fop_com = {'fop' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -fo $SOURCE -pdf $TARGET',
'xep' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -valid -fo $SOURCE -pdf $TARGET',
'jw' : '$DOCBOOK_FOP $DOCBOOK_FOPFLAGS -f docbook -b pdf $SOURCE -o $TARGET'}
def __detect_cl_tool(env, chainkey, cdict):
"""
Helper function, picks a command line tool from the list
and initializes its environment variables.
"""
if env.get(chainkey,'') == '':
clpath = ''
for cltool in cdict:
clpath = env.WhereIs(cltool)
if clpath:
env[chainkey] = clpath
if not env[chainkey + 'COM']:
env[chainkey + 'COM'] = cdict[cltool]
def _detect(env):
"""
Detect all the command line tools that we might need for creating
the requested output formats.
"""
global prefer_xsltproc
if env.get('DOCBOOK_PREFER_XSLTPROC',''):
prefer_xsltproc = True
if ((not has_libxml2 and not has_lxml) or (prefer_xsltproc)):
# Try to find the XSLT processors
__detect_cl_tool(env, 'DOCBOOK_XSLTPROC', xsltproc_com)
__detect_cl_tool(env, 'DOCBOOK_XMLLINT', xmllint_com)
__detect_cl_tool(env, 'DOCBOOK_FOP', fop_com)
#
# Scanners
#
include_re = re.compile('fileref\\s*=\\s*["|\']([^\\n]*)["|\']')
sentity_re = re.compile('<!ENTITY\\s+%*\\s*[^\\s]+\\s+SYSTEM\\s+["|\']([^\\n]*)["|\']>')
def __xml_scan(node, env, path, arg):
""" Simple XML file scanner, detecting local images and XIncludes as implicit dependencies. """
# Does the node exist yet?
if not os.path.isfile(str(node)):
return []
if env.get('DOCBOOK_SCANENT',''):
# Use simple pattern matching for system entities..., no support
# for recursion yet.
contents = node.get_text_contents()
return sentity_re.findall(contents)
xsl_file = os.path.join(scriptpath,'utils','xmldepend.xsl')
if not has_libxml2 or prefer_xsltproc:
if has_lxml and not prefer_xsltproc:
from lxml import etree
xsl_tree = etree.parse(xsl_file)
doc = etree.parse(str(node))
result = doc.xslt(xsl_tree)
depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")]
return depfiles
else:
# Try to call xsltproc
xsltproc = env.subst("$DOCBOOK_XSLTPROC")
if xsltproc and xsltproc.endswith('xsltproc'):
result = env.backtick(' '.join([xsltproc, xsl_file, str(node)]))
depfiles = [x.strip() for x in str(result).splitlines() if x.strip() != "" and not x.startswith("<?xml ")]
return depfiles
else:
# Use simple pattern matching, there is currently no support
# for xi:includes...
contents = node.get_text_contents()
return include_re.findall(contents)
styledoc = libxml2.parseFile(xsl_file)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.readFile(str(node), None, libxml2.XML_PARSE_NOENT)
result = style.applyStylesheet(doc, None)
depfiles = []
for x in str(result).splitlines():
if x.strip() != "" and not x.startswith("<?xml "):
depfiles.extend(x.strip().split())
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return depfiles
# Creating the instance of our XML dependency scanner
docbook_xml_scanner = SCons.Script.Scanner(function = __xml_scan,
argument = None)
#
# Action generators
#
def __generate_xsltproc_action(source, target, env, for_signature):
cmd = env['DOCBOOK_XSLTPROCCOM']
# Does the environment have a base_dir defined?
base_dir = env.subst('$base_dir')
if base_dir:
# Yes, so replace target path by its filename
return cmd.replace('$TARGET','${TARGET.file}')
return cmd
#
# Emitters
#
def __emit_xsl_basedir(target, source, env):
# Does the environment have a base_dir defined?
base_dir = env.subst('$base_dir')
if base_dir:
# Yes, so prepend it to each target
return [os.path.join(base_dir, str(t)) for t in target], source
# No, so simply pass target and source names through
return target, source
#
# Builders
#
def __build_libxml2(target, source, env):
"""
General XSLT builder (HTML/FO), using the libxml2 module.
"""
xsl_style = env.subst('$DOCBOOK_XSL')
styledoc = libxml2.parseFile(xsl_style)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.readFile(str(source[0]),None,libxml2.XML_PARSE_NOENT)
# Support for additional parameters
parampass = {}
if parampass:
result = style.applyStylesheet(doc, parampass)
else:
result = style.applyStylesheet(doc, None)
style.saveResultToFilename(str(target[0]), result, 0)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
return None
def __build_lxml(target, source, env):
"""
General XSLT builder (HTML/FO), using the lxml module.
"""
from lxml import etree
xslt_ac = etree.XSLTAccessControl(read_file=True,
write_file=True,
create_dir=True,
read_network=False,
write_network=False)
xsl_style = env.subst('$DOCBOOK_XSL')
xsl_tree = etree.parse(xsl_style)
transform = etree.XSLT(xsl_tree, access_control=xslt_ac)
doc = etree.parse(str(source[0]))
# Support for additional parameters
parampass = {}
if parampass:
result = transform(doc, **parampass)
else:
result = transform(doc)
try:
of = open(str(target[0]), "w")
of.write(of.write(etree.tostring(result, pretty_print=True)))
of.close()
except:
pass
return None
def __xinclude_libxml2(target, source, env):
"""
Resolving XIncludes, using the libxml2 module.
"""
doc = libxml2.readFile(str(source[0]), None, libxml2.XML_PARSE_NOENT)
doc.xincludeProcessFlags(libxml2.XML_PARSE_NOENT)
doc.saveFile(str(target[0]))
doc.freeDoc()
return None
def __xinclude_lxml(target, source, env):
"""
Resolving XIncludes, using the lxml module.
"""
from lxml import etree
doc = etree.parse(str(source[0]))
doc.xinclude()
try:
doc.write(str(target[0]), xml_declaration=True,
encoding="UTF-8", pretty_print=True)
except:
pass
return None
__libxml2_builder = SCons.Builder.Builder(
action = __build_libxml2,
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__lxml_builder = SCons.Builder.Builder(
action = __build_lxml,
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__xinclude_libxml2_builder = SCons.Builder.Builder(
action = __xinclude_libxml2,
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__xinclude_lxml_builder = SCons.Builder.Builder(
action = __xinclude_lxml,
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__xsltproc_builder = SCons.Builder.Builder(
action = SCons.Action.CommandGeneratorAction(__generate_xsltproc_action,
{'cmdstr' : '$DOCBOOK_XSLTPROCCOMSTR'}),
src_suffix = '.xml',
source_scanner = docbook_xml_scanner,
emitter = __emit_xsl_basedir)
__xmllint_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$DOCBOOK_XMLLINTCOM','$DOCBOOK_XMLLINTCOMSTR'),
suffix = '.xml',
src_suffix = '.xml',
source_scanner = docbook_xml_scanner)
__fop_builder = SCons.Builder.Builder(
action = SCons.Action.Action('$DOCBOOK_FOPCOM','$DOCBOOK_FOPCOMSTR'),
suffix = '.pdf',
src_suffix = '.fo',
ensure_suffix=1)
def DocbookEpub(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for ePub output.
"""
import zipfile
import shutil
def build_open_container(target, source, env):
"""Generate the *.epub file from intermediate outputs
Constructs the epub file according to the Open Container Format. This
function could be replaced by a call to the SCons Zip builder if support
was added for different compression formats for separate source nodes.
"""
zf = zipfile.ZipFile(str(target[0]), 'w')
mime_file = open('mimetype', 'w')
mime_file.write('application/epub+zip')
mime_file.close()
zf.write(mime_file.name, compress_type = zipfile.ZIP_STORED)
for s in source:
if os.path.isfile(str(s)):
head, tail = os.path.split(str(s))
if not head:
continue
s = head
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))),
zipfile.ZIP_DEFLATED)
zf.close()
def add_resources(target, source, env):
"""Add missing resources to the OEBPS directory
Ensure all the resources in the manifest are present in the OEBPS directory.
"""
hrefs = []
content_file = os.path.join(source[0].get_abspath(), 'content.opf')
if not os.path.isfile(content_file):
return
hrefs = []
if has_libxml2:
nsmap = {'opf' : 'http://www.idpf.org/2007/opf'}
# Read file and resolve entities
doc = libxml2.readFile(content_file, None, 0)
opf = doc.getRootElement()
# Create xpath context
xpath_context = doc.xpathNewContext()
# Register namespaces
for key, val in nsmap.iteritems():
xpath_context.xpathRegisterNs(key, val)
if hasattr(opf, 'xpathEval') and xpath_context:
# Use the xpath context
xpath_context.setContextNode(opf)
items = xpath_context.xpathEval(".//opf:item")
else:
items = opf.findall(".//{'http://www.idpf.org/2007/opf'}item")
for item in items:
if hasattr(item, 'prop'):
hrefs.append(item.prop('href'))
else:
hrefs.append(item.attrib['href'])
doc.freeDoc()
xpath_context.xpathFreeContext()
elif has_lxml:
from lxml import etree
opf = etree.parse(content_file)
# All the opf:item elements are resources
for item in opf.xpath('//opf:item',
namespaces= { 'opf': 'http://www.idpf.org/2007/opf' }):
hrefs.append(item.attrib['href'])
for href in hrefs:
# If the resource was not already created by DocBook XSL itself,
# copy it into the OEBPS folder
referenced_file = os.path.join(source[0].get_abspath(), href)
if not os.path.exists(referenced_file):
shutil.copy(href, os.path.join(source[0].get_abspath(), href))
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_EPUB', ['epub','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
if not env.GetOption('clean'):
# Ensure that the folders OEBPS and META-INF exist
__create_output_dir('OEBPS/')
__create_output_dir('META-INF/')
dirs = env.Dir(['OEBPS', 'META-INF'])
# Set the fixed base_dir
kw['base_dir'] = 'OEBPS/'
tocncx = __builder.__call__(env, 'toc.ncx', source[0], **kw)
cxml = env.File('META-INF/container.xml')
env.SideEffect(cxml, tocncx)
env.Depends(tocncx, kw['DOCBOOK_XSL'])
result.extend(tocncx+[cxml])
container = env.Command(__ensure_suffix(str(target[0]), '.epub'),
tocncx+[cxml], [add_resources, build_open_container])
mimetype = env.File('mimetype')
env.SideEffect(mimetype, container)
result.extend(container)
# Add supporting files for cleanup
env.Clean(tocncx, dirs)
return result
def DocbookHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTML', ['html','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
r = __builder.__call__(env, __ensure_suffix(t,'.html'), s, **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
return result
def DocbookHtmlChunked(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for chunked HTML output.
"""
# Init target/source
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTMLCHUNKED', ['html','chunkfast.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, glob.glob(os.path.join(base_dir, '*.html')))
return result
def DocbookHtmlhelp(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTMLHELP output.
"""
# Init target/source
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_HTMLHELP', ['htmlhelp','htmlhelp.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, ['toc.hhc', 'htmlhelp.hhp', 'index.hhk'] +
glob.glob(os.path.join(base_dir, '[ar|bk|ch]*.html')))
return result
def DocbookPdf(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for PDF output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_PDF', ['fo','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
t, stem = __ensure_suffix_stem(t, '.pdf')
xsl = __builder.__call__(env, stem+'.fo', s, **kw)
result.extend(xsl)
env.Depends(xsl, kw['DOCBOOK_XSL'])
result.extend(__fop_builder.__call__(env, t, xsl, **kw))
return result
def DocbookMan(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for Man page output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_MAN', ['manpages','docbook.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
volnum = "1"
outfiles = []
srcfile = __ensure_suffix(str(s),'.xml')
if os.path.isfile(srcfile):
try:
import xml.dom.minidom
dom = xml.dom.minidom.parse(__ensure_suffix(str(s),'.xml'))
# Extract volume number, default is 1
for node in dom.getElementsByTagName('refmeta'):
for vol in node.getElementsByTagName('manvolnum'):
volnum = __get_xml_text(vol)
# Extract output filenames
for node in dom.getElementsByTagName('refnamediv'):
for ref in node.getElementsByTagName('refname'):
outfiles.append(__get_xml_text(ref)+'.'+volnum)
except:
# Use simple regex parsing
f = open(__ensure_suffix(str(s),'.xml'), 'r')
content = f.read()
f.close()
for m in re_manvolnum.finditer(content):
volnum = m.group(1)
for m in re_refname.finditer(content):
outfiles.append(m.group(1)+'.'+volnum)
if not outfiles:
# Use stem of the source file
spath = str(s)
if not spath.endswith('.xml'):
outfiles.append(spath+'.'+volnum)
else:
stem, ext = os.path.splitext(spath)
outfiles.append(stem+'.'+volnum)
else:
# We have to completely rely on the given target name
outfiles.append(t)
__builder.__call__(env, outfiles[0], s, **kw)
env.Depends(outfiles[0], kw['DOCBOOK_XSL'])
result.append(outfiles[0])
if len(outfiles) > 1:
env.Clean(outfiles[0], outfiles[1:])
return result
def DocbookSlidesPdf(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for PDF slides output.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_SLIDESPDF', ['slides','fo','plain.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
t, stem = __ensure_suffix_stem(t, '.pdf')
xsl = __builder.__call__(env, stem+'.fo', s, **kw)
env.Depends(xsl, kw['DOCBOOK_XSL'])
result.extend(xsl)
result.extend(__fop_builder.__call__(env, t, xsl, **kw))
return result
def DocbookSlidesHtml(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, providing a Docbook toolchain for HTML slides output.
"""
# Init list of targets/sources
if not SCons.Util.is_List(target):
target = [target]
if not source:
source = target
target = ['index.html']
elif not SCons.Util.is_List(source):
source = [source]
# Init XSL stylesheet
__init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_SLIDESHTML', ['slides','html','plain.xsl'])
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Detect base dir
base_dir = kw.get('base_dir', '')
if base_dir:
__create_output_dir(base_dir)
# Create targets
result = []
r = __builder.__call__(env, __ensure_suffix(str(target[0]), '.html'), source[0], **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
# Add supporting files for cleanup
env.Clean(r, [os.path.join(base_dir, 'toc.html')] +
glob.glob(os.path.join(base_dir, 'foil*.html')))
return result
def DocbookXInclude(env, target, source, *args, **kw):
"""
A pseudo-Builder, for resolving XIncludes in a separate processing step.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Setup builder
__builder = __select_builder(__xinclude_lxml_builder,__xinclude_libxml2_builder,__xmllint_builder)
# Create targets
result = []
for t,s in zip(target,source):
result.extend(__builder.__call__(env, t, s, **kw))
return result
def DocbookXslt(env, target, source=None, *args, **kw):
"""
A pseudo-Builder, applying a simple XSL transformation to the input file.
"""
# Init list of targets/sources
target, source = __extend_targets_sources(target, source)
# Init XSL stylesheet
kw['DOCBOOK_XSL'] = kw.get('xsl', 'transform.xsl')
# Setup builder
__builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)
# Create targets
result = []
for t,s in zip(target,source):
r = __builder.__call__(env, t, s, **kw)
env.Depends(r, kw['DOCBOOK_XSL'])
result.extend(r)
return result
def generate(env):
"""Add Builders and construction variables for docbook to an Environment."""
env.SetDefault(
# Default names for customized XSL stylesheets
DOCBOOK_DEFAULT_XSL_EPUB = '',
DOCBOOK_DEFAULT_XSL_HTML = '',
DOCBOOK_DEFAULT_XSL_HTMLCHUNKED = '',
DOCBOOK_DEFAULT_XSL_HTMLHELP = '',
DOCBOOK_DEFAULT_XSL_PDF = '',
DOCBOOK_DEFAULT_XSL_MAN = '',
DOCBOOK_DEFAULT_XSL_SLIDESPDF = '',
DOCBOOK_DEFAULT_XSL_SLIDESHTML = '',
# Paths to the detected executables
DOCBOOK_XSLTPROC = '',
DOCBOOK_XMLLINT = '',
DOCBOOK_FOP = '',
# Additional flags for the text processors
DOCBOOK_XSLTPROCFLAGS = SCons.Util.CLVar(''),
DOCBOOK_XMLLINTFLAGS = SCons.Util.CLVar(''),
DOCBOOK_FOPFLAGS = SCons.Util.CLVar(''),
DOCBOOK_XSLTPROCPARAMS = SCons.Util.CLVar(''),
# Default command lines for the detected executables
DOCBOOK_XSLTPROCCOM = xsltproc_com['xsltproc'],
DOCBOOK_XMLLINTCOM = xmllint_com['xmllint'],
DOCBOOK_FOPCOM = fop_com['fop'],
# Screen output for the text processors
DOCBOOK_XSLTPROCCOMSTR = None,
DOCBOOK_XMLLINTCOMSTR = None,
DOCBOOK_FOPCOMSTR = None,
)
_detect(env)
try:
env.AddMethod(DocbookEpub, "DocbookEpub")
env.AddMethod(DocbookHtml, "DocbookHtml")
env.AddMethod(DocbookHtmlChunked, "DocbookHtmlChunked")
env.AddMethod(DocbookHtmlhelp, "DocbookHtmlhelp")
env.AddMethod(DocbookPdf, "DocbookPdf")
env.AddMethod(DocbookMan, "DocbookMan")
env.AddMethod(DocbookSlidesPdf, "DocbookSlidesPdf")
env.AddMethod(DocbookSlidesHtml, "DocbookSlidesHtml")
env.AddMethod(DocbookXInclude, "DocbookXInclude")
env.AddMethod(DocbookXslt, "DocbookXslt")
except AttributeError:
# Looks like we use a pre-0.98 version of SCons...
from SCons.Script.SConscript import SConsEnvironment
SConsEnvironment.DocbookEpub = DocbookEpub
SConsEnvironment.DocbookHtml = DocbookHtml
SConsEnvironment.DocbookHtmlChunked = DocbookHtmlChunked
SConsEnvironment.DocbookHtmlhelp = DocbookHtmlhelp
SConsEnvironment.DocbookPdf = DocbookPdf
SConsEnvironment.DocbookMan = DocbookMan
SConsEnvironment.DocbookSlidesPdf = DocbookSlidesPdf
SConsEnvironment.DocbookSlidesHtml = DocbookSlidesHtml
SConsEnvironment.DocbookXInclude = DocbookXInclude
SConsEnvironment.DocbookXslt = DocbookXslt
def exists(env):
return 1
|
lgpl-3.0
|
raviflipsyde/servo
|
tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/genshistream.py
|
1730
|
2278
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
mpl-2.0
|
chrizandr/ITS_feedback
|
feedback_portal/main/forms.py
|
1
|
4400
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import *
from django.forms import CharField
from django.forms import FileField
from django.forms import FileInput
from django.forms import Form
from django.forms import PasswordInput
from django.forms import TextInput
from .models import FileUpload, RequestFeedback
class LoginForm(AuthenticationForm):
"""
Form for doing user login
NOT USED
"""
username = CharField(
widget=TextInput(
attrs={'class': 'mdl-textfield__input', 'id': 'username'}))
password = CharField(
widget=PasswordInput(
attrs={'class': 'mdl-textfield__input', 'id': 'password'}))
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError(
'Sorry, username or password incorrect!')
return self.cleaned_data
class ChangePasswordForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
NOT USED
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': ('Your old password was entered incorrectly. '
'Please enter it again.'),
})
old_password = forms.CharField(
label=('Old password'),
widget=forms.PasswordInput(attrs={'autofocus': ''}),
)
field_order = ['old_password', 'new_password1', 'new_password2']
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data['old_password']
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
class NewPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
NOT USED
"""
error_messages = {
'password_mismatch': ("The two password fields didn't match."),
}
new_password1 = CharField(label=('New password'),
widget=PasswordInput(
attrs={'class': 'mdl-textfield__input'}),
help_text=password_validation.password_validators_help_text_html())
new_password2 = CharField(
label=('New password confirmation'),
widget=PasswordInput(attrs={'class': 'mdl-textfield__input'}))
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data['new_password1']
self.user.set_password(password)
if commit:
self.user.save()
return self.user
class FileForm(forms.ModelForm):
class Meta:
model = FileUpload
fields = ['CSVFile', ]
class CourseStudentForm(FileForm):
CourseList = forms.CharField(label="Order of Course Names in the csv")
class FeedbackRequestForm(forms.ModelForm):
class Meta:
model = RequestFeedback
fields = ['course', 'request_by', 'end_date']
class UserForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = [
'username',
'email',
]
def save(self,commit=True):
user = super(UserForm, self).save(commit = False)
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
|
gpl-3.0
|
ramitsurana/boto
|
boto/rds/vpcsecuritygroupmembership.py
|
177
|
3131
|
# Copyright (c) 2013 Anthony Tonns http://www.corsis.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a VPCSecurityGroupMembership
"""
class VPCSecurityGroupMembership(object):
"""
Represents VPC Security Group that this RDS database is a member of
Properties reference available from the AWS documentation at
http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\
API_VpcSecurityGroupMembership.html
Example::
pri = "sg-abcdefgh"
sec = "sg-hgfedcba"
# Create with list of str
db = c.create_dbinstance(... vpc_security_groups=[pri], ... )
# Modify with list of str
db.modify(... vpc_security_groups=[pri,sec], ... )
# Create with objects
memberships = []
membership = VPCSecurityGroupMembership()
membership.vpc_group = pri
memberships.append(membership)
db = c.create_dbinstance(... vpc_security_groups=memberships, ... )
# Modify with objects
memberships = d.vpc_security_groups
membership = VPCSecurityGroupMembership()
membership.vpc_group = sec
memberships.append(membership)
db.modify(... vpc_security_groups=memberships, ... )
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the
current object
:ivar vpc_group: This id of the VPC security group
:ivar status: Status of the VPC security group membership
<boto.ec2.securitygroup.SecurityGroup>` objects that this RDS Instance
is a member of
"""
def __init__(self, connection=None, status=None, vpc_group=None):
self.connection = connection
self.status = status
self.vpc_group = vpc_group
def __repr__(self):
return 'VPCSecurityGroupMembership:%s' % self.vpc_group
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'VpcSecurityGroupId':
self.vpc_group = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
|
mit
|
mplpl/m7z
|
lib7z/googletest/test/gtest_break_on_failure_unittest.py
|
2140
|
7339
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
lgpl-2.1
|
moonboots/tensorflow
|
tensorflow/contrib/ctc/ctc_loss_op_test.py
|
14
|
8764
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def SimpleSparseTensorFrom(x):
"""Create a very simple SparseTensor with dimensions (batch, time).
Args:
x: a list of lists of type int
Returns:
x_ix and x_val, the indices and values of the SparseTensor<2>.
"""
x_ix = []
x_val = []
for batch_i, batch in enumerate(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]
x_ix = tf.constant(x_ix, tf.int64)
x_val = tf.constant(x_val, tf.int32)
x_shape = tf.constant(x_shape, tf.int64)
return tf.SparseTensor(x_ix, x_val, x_shape)
class CTCLossTest(tf.test.TestCase):
def _testCTCLoss(self, inputs, seq_lens, labels,
loss_truth, grad_truth, expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
inputs_t = tf.constant(inputs)
with self.test_session(use_gpu=False) as sess:
loss = tf.contrib.ctc.ctc_loss(inputs=inputs_t,
labels=labels,
sequence_length=seq_lens)
grad = tf.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
if expected_err_re is None:
(tf_loss, tf_grad) = sess.run([loss, grad])
self.assertAllClose(tf_loss, loss_truth, atol=1e-6)
self.assertAllClose(tf_grad, grad_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run([loss, grad])
def testBasic(self):
"""Test two batch entries."""
# Input and ground truth from Alex Graves' implementation.
#
#### Batch entry 0 #####
# targets: 0 1 2 1 0
# outputs:
# 0 0.633766 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 0.588392 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 0.321418 0.00249248 0.00272882 0.0037688
# 3 0.0663296 0.643849 0.280111 0.00283995 0.0035545 0.00331533
# 4 0.458235 0.396634 0.123377 0.00648837 0.00903441 0.00623107
# alpha:
# 0 -3.64753 -0.456075 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -0.986437 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -2.12145 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -2.56174 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf -3.34211 -inf
# beta:
# 0 -inf -2.88604 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -2.35568 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -1.22066 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -0.780373 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -3.34211
# outputDerivs:
# 0 -0.366234 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 -0.411608 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 -0.678582 0.00249248 0.00272882 0.0037688
# 3 0.0663296 -0.356151 0.280111 0.00283995 0.0035545 0.00331533
# 4 -0.541765 0.396634 0.123377 0.00648837 0.00903441 0.00623107
#
#### Batch entry 1 #####
#
# targets: 0 1 1 0
# outputs:
# 0 0.30176 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 0.397533 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 0.202456
# 3 0.280884 0.429522 0.0326593 0.0339046 0.0326856 0.190345
# 4 0.423286 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# alpha:
# 0 -1.8232 -1.19812 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -2.19315 -2.83037 -2.1206 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -2.03268 -3.71783 -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -4.56292 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -5.42262 -inf
# beta:
# 0 -inf -4.2245 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -3.30202 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -1.70479 -0.856738 -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -0.859706 -0.859706 -0.549337 -inf
# 4 -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -5.42262
# outputDerivs:
# 0 -0.69824 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 -0.602467 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 -0.797544
# 3 0.280884 -0.570478 0.0326593 0.0339046 0.0326856 0.190345
# 4 -0.576714 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# max_time_steps == 7
depth = 6
# seq_len_0 == 5
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
# dimensions are time x depth
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
# seq_len_1 == 5
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
# len max_time_steps array of 2 x depth matrices
inputs = [np.vstack([input_log_prob_matrix_0[t, :],
input_log_prob_matrix_1[t, :]])
for t in range(5)] + 2 * [np.nan*np.ones((2, depth), np.float32)]
# convert inputs into [max_time x batch_size x depth tensor] Tensor
inputs = np.asarray(inputs, dtype=np.float32)
# len batch_size array of label vectors
labels = SimpleSparseTensorFrom([targets_0, targets_1])
# batch_size length vector of sequence_lengths
seq_lens = np.array([5, 5], dtype=np.int32)
# output: batch_size length vector of negative log probabilities
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
# output: len max_time_steps array of 2 x depth matrices
grad_truth = [np.vstack([gradient_log_prob_0[t, :],
gradient_log_prob_1[t, :]])
for t in range(5)] + 2 * [np.zeros((2, depth), np.float32)]
# convert grad_truth into [max_time x batch_size x depth] Tensor
grad_truth = np.asarray(grad_truth, dtype=np.float32)
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
ahmetdaglarbas/e-commerce
|
oscar/apps/offer/south_migrations/0001_initial.py
|
17
|
19312
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('catalogue', '0001_initial'),
('order', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'ConditionalOffer'
db.create_table('offer_conditionaloffer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('offer_type', self.gf('django.db.models.fields.CharField')(default='Site', max_length=128)),
('condition', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.Condition'])),
('benefit', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.Benefit'])),
('start_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=0)),
('total_discount', self.gf('django.db.models.fields.DecimalField')(default='0.00', max_digits=12, decimal_places=2)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('redirect_url', self.gf('oscar.models.fields.ExtendedURLField')(max_length=200, blank=True)),
))
db.send_create_signal('offer', ['ConditionalOffer'])
# Adding model 'Condition'
db.create_table('offer_condition', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('range', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.Range'])),
('type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('value', self.gf('oscar.models.fields.PositiveDecimalField')(max_digits=12, decimal_places=2)),
))
db.send_create_signal('offer', ['Condition'])
# Adding model 'Benefit'
db.create_table('offer_benefit', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('range', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.Range'], null=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('value', self.gf('oscar.models.fields.PositiveDecimalField')(null=True, max_digits=12, decimal_places=2, blank=True)),
('max_affected_items', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal('offer', ['Benefit'])
# Adding model 'Range'
db.create_table('offer_range', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('includes_all_products', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('offer', ['Range'])
# Adding M2M table for field included_products on 'Range'
db.create_table('offer_range_included_products', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('range', models.ForeignKey(orm['offer.range'], null=False)),
('product', models.ForeignKey(orm['catalogue.product'], null=False))
))
db.create_unique('offer_range_included_products', ['range_id', 'product_id'])
# Adding M2M table for field excluded_products on 'Range'
db.create_table('offer_range_excluded_products', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('range', models.ForeignKey(orm['offer.range'], null=False)),
('product', models.ForeignKey(orm['catalogue.product'], null=False))
))
db.create_unique('offer_range_excluded_products', ['range_id', 'product_id'])
# Adding M2M table for field classes on 'Range'
db.create_table('offer_range_classes', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('range', models.ForeignKey(orm['offer.range'], null=False)),
('productclass', models.ForeignKey(orm['catalogue.productclass'], null=False))
))
db.create_unique('offer_range_classes', ['range_id', 'productclass_id'])
# Adding M2M table for field included_categories on 'Range'
db.create_table('offer_range_included_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('range', models.ForeignKey(orm['offer.range'], null=False)),
('category', models.ForeignKey(orm['catalogue.category'], null=False))
))
db.create_unique('offer_range_included_categories', ['range_id', 'category_id'])
def backwards(self, orm):
# Deleting model 'ConditionalOffer'
db.delete_table('offer_conditionaloffer')
# Deleting model 'Condition'
db.delete_table('offer_condition')
# Deleting model 'Benefit'
db.delete_table('offer_benefit')
# Deleting model 'Range'
db.delete_table('offer_range')
# Removing M2M table for field included_products on 'Range'
db.delete_table('offer_range_included_products')
# Removing M2M table for field excluded_products on 'Range'
db.delete_table('offer_range_excluded_products')
# Removing M2M table for field classes on 'Range'
db.delete_table('offer_range_classes')
# Removing M2M table for field included_categories on 'Range'
db.delete_table('offer_range_included_categories')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['offer']
|
bsd-3-clause
|
liangazhou/django-rdp
|
packages/Django-1.8.6/django/middleware/clickjacking.py
|
36
|
1995
|
"""
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
https://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options', None) is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
|
apache-2.0
|
CourseTalk/edx-platform
|
cms/djangoapps/contentstore/views/import_export.py
|
5
|
21168
|
"""
These views handle all actions in Studio related to import and exporting of
courses
"""
import base64
import logging
import os
import re
import shutil
import tarfile
from path import Path as path
from tempfile import mkdtemp
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import SuspiciousOperation, PermissionDenied
from django.core.files.temp import NamedTemporaryFile
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods, require_GET
import dogstats_wrapper as dog_stats_api
from edxmako.shortcuts import render_to_response
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import SerializationError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator
from xmodule.modulestore.xml_importer import import_course_from_xml, import_library_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml, export_library_to_xml
from xmodule.modulestore import COURSE_ROOT, LIBRARY_ROOT
from student.auth import has_course_author_access
from openedx.core.lib.extract_tar import safetar_extractall
from util.json_request import JsonResponse
from util.views import ensure_valid_course_key
from models.settings.course_metadata import CourseMetadata
from contentstore.views.entrance_exam import (
add_entrance_exam_milestone,
remove_entrance_exam_milestone_reference
)
from contentstore.utils import reverse_course_url, reverse_usage_url, reverse_library_url
__all__ = [
'import_handler', 'import_status_handler',
'export_handler',
]
log = logging.getLogger(__name__)
# Regex to capture Content-Range header ranges.
CONTENT_RE = re.compile(r"(?P<start>\d{1,11})-(?P<stop>\d{1,11})/(?P<end>\d{1,11})")
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@ensure_valid_course_key
def import_handler(request, course_key_string):
"""
The restful handler for importing a course.
GET
html: return html page for import page
json: not supported
POST or PUT
json: import a course via the .tar.gz file specified in request.FILES
"""
courselike_key = CourseKey.from_string(course_key_string)
library = isinstance(courselike_key, LibraryLocator)
if library:
root_name = LIBRARY_ROOT
successful_url = reverse_library_url('library_handler', courselike_key)
context_name = 'context_library'
courselike_module = modulestore().get_library(courselike_key)
import_func = import_library_from_xml
else:
root_name = COURSE_ROOT
successful_url = reverse_course_url('course_handler', courselike_key)
context_name = 'context_course'
courselike_module = modulestore().get_course(courselike_key)
import_func = import_course_from_xml
return _import_handler(
request, courselike_key, root_name, successful_url, context_name, courselike_module, import_func
)
def _import_handler(request, courselike_key, root_name, successful_url, context_name, courselike_module, import_func):
"""
Parameterized function containing the meat of import_handler.
"""
if not has_course_author_access(request.user, courselike_key):
raise PermissionDenied()
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
# Do everything in a try-except block to make sure everything is properly cleaned up.
try:
data_root = path(settings.GITHUB_REPO_ROOT)
subdir = base64.urlsafe_b64encode(repr(courselike_key))
course_dir = data_root / subdir
filename = request.FILES['course-data'].name
# Use sessions to keep info about import progress
session_status = request.session.setdefault("import_status", {})
courselike_string = unicode(courselike_key) + filename
_save_request_status(request, courselike_string, 0)
# If the course has an entrance exam then remove it and its corresponding milestone.
# current course state before import.
if root_name == COURSE_ROOT:
if courselike_module.entrance_exam_enabled:
remove_entrance_exam_milestone_reference(request, courselike_key)
log.info(
"entrance exam milestone content reference for course %s has been removed",
courselike_module.id
)
if not filename.endswith('.tar.gz'):
_save_request_status(request, courselike_string, -1)
return JsonResponse(
{
'ErrMsg': _('We only support uploading a .tar.gz file.'),
'Stage': -1
},
status=415
)
temp_filepath = course_dir / filename
if not course_dir.isdir():
os.mkdir(course_dir)
logging.debug('importing course to {0}'.format(temp_filepath))
# Get upload chunks byte ranges
try:
matches = CONTENT_RE.search(request.META["HTTP_CONTENT_RANGE"])
content_range = matches.groupdict()
except KeyError: # Single chunk
# no Content-Range header, so make one that will work
content_range = {'start': 0, 'stop': 1, 'end': 2}
# stream out the uploaded files in chunks to disk
if int(content_range['start']) == 0:
mode = "wb+"
else:
mode = "ab+"
size = os.path.getsize(temp_filepath)
# Check to make sure we haven't missed a chunk
# This shouldn't happen, even if different instances are handling
# the same session, but it's always better to catch errors earlier.
if size < int(content_range['start']):
_save_request_status(request, courselike_string, -1)
log.warning(
"Reported range %s does not match size downloaded so far %s",
content_range['start'],
size
)
return JsonResponse(
{
'ErrMsg': _('File upload corrupted. Please try again'),
'Stage': -1
},
status=409
)
# The last request sometimes comes twice. This happens because
# nginx sends a 499 error code when the response takes too long.
elif size > int(content_range['stop']) and size == int(content_range['end']):
return JsonResponse({'ImportStatus': 1})
with open(temp_filepath, mode) as temp_file:
for chunk in request.FILES['course-data'].chunks():
temp_file.write(chunk)
size = os.path.getsize(temp_filepath)
if int(content_range['stop']) != int(content_range['end']) - 1:
# More chunks coming
return JsonResponse({
"files": [{
"name": filename,
"size": size,
"deleteUrl": "",
"deleteType": "",
"url": reverse_course_url('import_handler', courselike_key),
"thumbnailUrl": ""
}]
})
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
_save_request_status(request, courselike_string, -1)
if course_dir.isdir():
shutil.rmtree(course_dir)
log.info("Course import %s: Temp data cleared", courselike_key)
log.exception(
"error importing course"
)
return JsonResponse(
{
'ErrMsg': str(exception),
'Stage': -1
},
status=400
)
# try-finally block for proper clean up after receiving last chunk.
try:
# This was the last chunk.
log.info("Course import %s: Upload complete", courselike_key)
_save_request_status(request, courselike_string, 1)
tar_file = tarfile.open(temp_filepath)
try:
safetar_extractall(tar_file, (course_dir + '/').encode('utf-8'))
except SuspiciousOperation as exc:
_save_request_status(request, courselike_string, -1)
return JsonResponse(
{
'ErrMsg': 'Unsafe tar file. Aborting import.',
'SuspiciousFileOperationMsg': exc.args[0],
'Stage': -1
},
status=400
)
finally:
tar_file.close()
log.info("Course import %s: Uploaded file extracted", courselike_key)
_save_request_status(request, courselike_string, 2)
# find the 'course.xml' file
def get_all_files(directory):
"""
For each file in the directory, yield a 2-tuple of (file-name,
directory-path)
"""
for dirpath, _dirnames, filenames in os.walk(directory):
for filename in filenames:
yield (filename, dirpath)
def get_dir_for_fname(directory, filename):
"""
Returns the dirpath for the first file found in the directory
with the given name. If there is no file in the directory with
the specified name, return None.
"""
for fname, dirpath in get_all_files(directory):
if fname == filename:
return dirpath
return None
dirpath = get_dir_for_fname(course_dir, root_name)
if not dirpath:
_save_request_status(request, courselike_string, -2)
return JsonResponse(
{
'ErrMsg': _('Could not find the {0} file in the package.').format(root_name),
'Stage': -2
},
status=415
)
dirpath = os.path.relpath(dirpath, data_root)
logging.debug('found %s at %s', root_name, dirpath)
log.info("Course import %s: Extracted file verified", courselike_key)
_save_request_status(request, courselike_string, 3)
with dog_stats_api.timer(
'courselike_import.time',
tags=[u"courselike:{}".format(courselike_key)]
):
courselike_items = import_func(
modulestore(), request.user.id,
settings.GITHUB_REPO_ROOT, [dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_id=courselike_key
)
new_location = courselike_items[0].location
logging.debug('new course at %s', new_location)
log.info("Course import %s: Course import successful", courselike_key)
_save_request_status(request, courselike_string, 4)
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=broad-except
log.exception(
"error importing course"
)
return JsonResponse(
{
'ErrMsg': str(exception),
'Stage': -session_status[courselike_string]
},
status=400
)
finally:
if course_dir.isdir():
shutil.rmtree(course_dir)
log.info("Course import %s: Temp data cleared", courselike_key)
# set failed stage number with negative sign in case of unsuccessful import
if session_status[courselike_string] != 4:
_save_request_status(request, courselike_string, -abs(session_status[courselike_string]))
# status == 4 represents that course has been imported successfully.
if session_status[courselike_string] == 4 and root_name == COURSE_ROOT:
# Reload the course so we have the latest state
course = modulestore().get_course(courselike_key)
if course.entrance_exam_enabled:
entrance_exam_chapter = modulestore().get_items(
course.id,
qualifiers={'category': 'chapter'},
settings={'is_entrance_exam': True}
)[0]
metadata = {'entrance_exam_id': unicode(entrance_exam_chapter.location)}
CourseMetadata.update_from_dict(metadata, course, request.user)
add_entrance_exam_milestone(course.id, entrance_exam_chapter)
log.info("Course %s Entrance exam imported", course.id)
return JsonResponse({'Status': 'OK'})
elif request.method == 'GET': # assume html
status_url = reverse_course_url(
"import_status_handler", courselike_key, kwargs={'filename': "fillerName"}
)
return render_to_response('import.html', {
context_name: courselike_module,
'successful_import_redirect_url': successful_url,
'import_status_url': status_url,
'library': isinstance(courselike_key, LibraryLocator)
})
else:
return HttpResponseNotFound()
def _save_request_status(request, key, status):
"""
Save import status for a course in request session
"""
session_status = request.session.get('import_status')
if session_status is None:
session_status = request.session.setdefault("import_status", {})
session_status[key] = status
request.session.save()
@require_GET
@ensure_csrf_cookie
@login_required
@ensure_valid_course_key
def import_status_handler(request, course_key_string, filename=None):
"""
Returns an integer corresponding to the status of a file import. These are:
-X : Import unsuccessful due to some error with X as stage [0-3]
0 : No status info found (import done or upload still in progress)
1 : Extracting file
2 : Validating.
3 : Importing to mongo
4 : Import successful
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
try:
session_status = request.session["import_status"]
status = session_status[course_key_string + filename]
except KeyError:
status = 0
return JsonResponse({"ImportStatus": status})
def create_export_tarball(course_module, course_key, context):
"""
Generates the export tarball, or returns None if there was an error.
Updates the context with any error information if applicable.
"""
name = course_module.url_name
export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz")
root_dir = path(mkdtemp())
try:
if isinstance(course_key, LibraryLocator):
export_library_to_xml(modulestore(), contentstore(), course_key, root_dir, name)
else:
export_course_to_xml(modulestore(), contentstore(), course_module.id, root_dir, name)
logging.debug(u'tar file being generated at %s', export_file.name)
with tarfile.open(name=export_file.name, mode='w:gz') as tar_file:
tar_file.add(root_dir / name, arcname=name)
except SerializationError as exc:
log.exception(u'There was an error exporting %s', course_key)
unit = None
failed_item = None
parent = None
try:
failed_item = modulestore().get_item(exc.location)
parent_loc = modulestore().get_parent_location(failed_item.location)
if parent_loc is not None:
parent = modulestore().get_item(parent_loc)
if parent.location.category == 'vertical':
unit = parent
except: # pylint: disable=bare-except
# if we have a nested exception, then we'll show the more generic error message
pass
context.update({
'in_err': True,
'raw_err_msg': str(exc),
'failed_module': failed_item,
'unit': unit,
'edit_unit_url': reverse_usage_url("container_handler", parent.location) if parent else "",
})
raise
except Exception as exc:
log.exception('There was an error exporting %s', course_key)
context.update({
'in_err': True,
'unit': None,
'raw_err_msg': str(exc)})
raise
finally:
shutil.rmtree(root_dir / name)
return export_file
def send_tarball(tarball):
"""
Renders a tarball to response, for use when sending a tar.gz file to the user.
"""
wrapper = FileWrapper(tarball)
response = HttpResponse(wrapper, content_type='application/x-tgz')
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(tarball.name.encode('utf-8'))
response['Content-Length'] = os.path.getsize(tarball.name)
return response
@ensure_csrf_cookie
@login_required
@require_http_methods(("GET",))
@ensure_valid_course_key
def export_handler(request, course_key_string):
"""
The restful handler for exporting a course.
GET
html: return html page for import page
application/x-tgz: return tar.gz file containing exported course
json: not supported
Note that there are 2 ways to request the tar.gz file. The request header can specify
application/x-tgz via HTTP_ACCEPT, or a query parameter can be used (?_accept=application/x-tgz).
If the tar.gz file has been requested but the export operation fails, an HTML page will be returned
which describes the error.
"""
course_key = CourseKey.from_string(course_key_string)
export_url = reverse_course_url('export_handler', course_key)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
if isinstance(course_key, LibraryLocator):
courselike_module = modulestore().get_library(course_key)
context = {
'context_library': courselike_module,
'courselike_home_url': reverse_library_url("library_handler", course_key),
'library': True
}
else:
courselike_module = modulestore().get_course(course_key)
context = {
'context_course': courselike_module,
'courselike_home_url': reverse_course_url("course_handler", course_key),
'library': False
}
context['export_url'] = export_url + '?_accept=application/x-tgz'
# an _accept URL parameter will be preferred over HTTP_ACCEPT in the header.
requested_format = request.GET.get('_accept', request.META.get('HTTP_ACCEPT', 'text/html'))
if 'application/x-tgz' in requested_format:
try:
tarball = create_export_tarball(courselike_module, course_key, context)
except SerializationError:
return render_to_response('export.html', context)
return send_tarball(tarball)
elif 'text/html' in requested_format:
return render_to_response('export.html', context)
else:
# Only HTML or x-tgz request formats are supported (no JSON).
return HttpResponse(status=406)
|
agpl-3.0
|
MindsIgnited/libjingle
|
talk/site_scons/site_tools/talk_linux.py
|
22
|
11731
|
# Copyright 2010 Google Inc.
# All Rights Reserved.
# Author: tschmelcher@google.com (Tristan Schmelcher)
"""Tool for helpers used in linux building process."""
import os
import SCons.Defaults
import subprocess
def _OutputFromShellCommand(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return process.communicate()[0].strip()
# This is a pure SCons helper function.
def _InternalBuildDebianPackage(env, debian_files, package_files,
output_dir=None, force_version=None):
"""Creates build rules to build a Debian package from the specified sources.
Args:
env: SCons Environment.
debian_files: Array of the Debian control file sources that should be
copied into the package source tree, e.g., changelog, control, rules,
etc.
package_files: An array of 2-tuples listing the files that should be
copied into the package source tree.
The first element is the path where the file should be placed for the
.install control file to find it, relative to the generated debian
package source directory.
The second element is the file source.
output_dir: An optional directory to place the files in. If omitted, the
current output directory is used.
force_version: Optional. Forces the version of the package to start with
this version string if specified. If the last entry in the changelog
is not for a version that starts with this then a dummy entry is
generated with this version and a ~prerelease suffix (so that the
final version will compare as greater).
Return:
A list of the targets (if any).
"""
if 0 != subprocess.call(['which', 'dpkg-buildpackage']):
print ('dpkg-buildpackage not installed on this system; '
'skipping DEB build stage')
return []
# Read the control file and changelog file to determine the package name,
# version, and arch that the Debian build tools will use to name the
# generated files.
control_file = None
changelog_file = None
for file in debian_files:
if os.path.basename(file) == 'control':
control_file = env.File(file).srcnode().abspath
elif os.path.basename(file) == 'changelog':
changelog_file = env.File(file).srcnode().abspath
if not control_file:
raise Exception('Need to have a control file')
if not changelog_file:
raise Exception('Need to have a changelog file')
source = _OutputFromShellCommand(
"awk '/^Source:/ { print $2; }' " + control_file)
packages = _OutputFromShellCommand(
"awk '/^Package:/ { print $2; }' " + control_file).split('\n')
version = _OutputFromShellCommand(
"sed -nr '1 { s/.*\\((.*)\\).*/\\1/; p }' " + changelog_file)
arch = _OutputFromShellCommand('dpkg --print-architecture')
add_dummy_changelog_entry = False
if force_version and not version.startswith(force_version):
print ('Warning: no entry in ' + changelog_file + ' for version ' +
force_version + ' (last is ' + version +'). A dummy entry will be ' +
'generated. Remember to add the real changelog entry before ' +
'releasing.')
version = force_version + '~prerelease'
add_dummy_changelog_entry = True
source_dir_name = source + '_' + version + '_' + arch
target_file_names = [ source_dir_name + '.changes' ]
for package in packages:
package_file_name = package + '_' + version + '_' + arch + '.deb'
target_file_names.append(package_file_name)
# The targets
if output_dir:
targets = [os.path.join(output_dir, s) for s in target_file_names]
else:
targets = target_file_names
# Path to where we will construct the debian build tree.
deb_build_tree = os.path.join(source_dir_name, 'deb_build_tree')
# First copy the files.
for file in package_files:
env.Command(os.path.join(deb_build_tree, file[0]), file[1],
SCons.Defaults.Copy('$TARGET', '$SOURCE'))
env.Depends(targets, os.path.join(deb_build_tree, file[0]))
# Now copy the Debian metadata sources. We have to do this all at once so
# that we can remove the target directory before copying, because there
# can't be any other stale files there or else dpkg-buildpackage may use
# them and give incorrect build output.
copied_debian_files_paths = []
for file in debian_files:
copied_debian_files_paths.append(os.path.join(deb_build_tree, 'debian',
os.path.basename(file)))
copy_commands = [
"""dir=$$(dirname $TARGET) && \
rm -Rf $$dir && \
mkdir -p $$dir && \
cp $SOURCES $$dir && \
chmod -R u+w $$dir"""
]
if add_dummy_changelog_entry:
copy_commands += [
"""debchange -c $$(dirname $TARGET)/changelog --newversion %s \
--distribution UNRELEASED \
'Developer preview build. (This entry was auto-generated.)'""" %
version
]
env.Command(copied_debian_files_paths, debian_files, copy_commands)
env.Depends(targets, copied_debian_files_paths)
# Must explicitly specify -a because otherwise cross-builds won't work.
# Must explicitly specify -D because -a disables it.
# Must explicitly specify fakeroot because old dpkg tools don't assume that.
env.Command(targets, None,
"""dir=%(dir)s && \
cd $$dir && \
dpkg-buildpackage -b -uc -a%(arch)s -D -rfakeroot && \
cd $$OLDPWD && \
for file in %(targets)s; do \
mv $$dir/../$$file $$(dirname $TARGET) || exit 1; \
done""" %
{'dir':env.Dir(deb_build_tree).path,
'arch':arch,
'targets':' '.join(target_file_names)})
return targets
def BuildDebianPackage(env, debian_files, package_files, force_version=None):
"""Creates build rules to build a Debian package from the specified sources.
This is a Hammer-ified version of _InternalBuildDebianPackage that knows to
put the packages in the Hammer staging dir.
Args:
env: SCons Environment.
debian_files: Array of the Debian control file sources that should be
copied into the package source tree, e.g., changelog, control, rules,
etc.
package_files: An array of 2-tuples listing the files that should be
copied into the package source tree.
The first element is the path where the file should be placed for the
.install control file to find it, relative to the generated debian
package source directory.
The second element is the file source.
force_version: Optional. Forces the version of the package to start with
this version string if specified. If the last entry in the changelog
is not for a version that starts with this then a dummy entry is
generated with this version and a ~prerelease suffix (so that the
final version will compare as greater).
Return:
A list of the targets (if any).
"""
if not env.Bit('host_linux'):
return []
return _InternalBuildDebianPackage(env, debian_files, package_files,
output_dir='$STAGING_DIR', force_version=force_version)
def _GetPkgConfigCommand():
"""Return the pkg-config command line to use.
Returns:
A string specifying the pkg-config command line to use.
"""
return os.environ.get('PKG_CONFIG') or 'pkg-config'
def _EscapePosixShellArgument(arg):
"""Escapes a shell command line argument so that it is interpreted literally.
Args:
arg: The shell argument to escape.
Returns:
The escaped string.
"""
return "'%s'" % arg.replace("'", "'\\''")
def _HavePackage(package):
"""Whether the given pkg-config package name is present on the build system.
Args:
package: The name of the package.
Returns:
True if the package is present, else False
"""
return subprocess.call('%s --exists %s' % (
_GetPkgConfigCommand(),
_EscapePosixShellArgument(package)), shell=True) == 0
def _GetPackageFlags(flag_type, packages):
"""Get the flags needed to compile/link against the given package(s).
Returns the flags that are needed to compile/link against the given pkg-config
package(s).
Args:
flag_type: The option to pkg-config specifying the type of flags to get.
packages: The list of package names as strings.
Returns:
The flags of the requested type.
Raises:
subprocess.CalledProcessError: The pkg-config command failed.
"""
pkg_config = _GetPkgConfigCommand()
command = ' '.join([pkg_config] +
[_EscapePosixShellArgument(arg) for arg in
[flag_type] + packages])
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = process.communicate()[0]
if process.returncode != 0:
raise subprocess.CalledProcessError(process.returncode, pkg_config)
return output.strip().split(' ')
def GetPackageParams(env, packages):
"""Get the params needed to compile/link against the given package(s).
Returns the params that are needed to compile/link against the given
pkg-config package(s).
Args:
env: The current SCons environment.
packages: The name of the package, or a list of names.
Returns:
A dictionary containing the params.
Raises:
Exception: One or more of the packages is not installed.
"""
if not env.Bit('host_linux'):
return {}
if not SCons.Util.is_List(packages):
packages = [packages]
for package in packages:
if not _HavePackage(package):
raise Exception(('Required package \"%s\" was not found. Please install '
'the package that provides the \"%s.pc\" file.') %
(package, package))
package_ccflags = _GetPackageFlags('--cflags', packages)
package_libs = _GetPackageFlags('--libs', packages)
# Split package_libs into libs, libdirs, and misc. linker flags. (In a perfect
# world we could just leave libdirs in link_flags, but some linkers are
# somehow confused by the different argument order.)
libs = [flag[2:] for flag in package_libs if flag[0:2] == '-l']
libdirs = [flag[2:] for flag in package_libs if flag[0:2] == '-L']
link_flags = [flag for flag in package_libs if flag[0:2] not in ['-l', '-L']]
return {
'ccflags': package_ccflags,
'libs': libs,
'libdirs': libdirs,
'link_flags': link_flags,
'dependent_target_settings' : {
'libs': libs[:],
'libdirs': libdirs[:],
'link_flags': link_flags[:],
},
}
def EnableFeatureWherePackagePresent(env, bit, cpp_flag, package):
"""Enable a feature if a required pkg-config package is present.
Args:
env: The current SCons environment.
bit: The name of the Bit to enable when the package is present.
cpp_flag: The CPP flag to enable when the package is present.
package: The name of the package.
"""
if not env.Bit('host_linux'):
return
if _HavePackage(package):
env.SetBits(bit)
env.Append(CPPDEFINES=[cpp_flag])
else:
print ('Warning: Package \"%s\" not found. Feature \"%s\" will not be '
'built. To build with this feature, install the package that '
'provides the \"%s.pc\" file.') % (package, bit, package)
def GetGccVersion(env):
if env.Bit('cross_compile'):
gcc_command = env['CXX']
else:
gcc_command = 'gcc'
version_string = _OutputFromShellCommand(
'%s --version | head -n 1 |'
r'sed "s/.*\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/\1/g"' % gcc_command)
return tuple([int(x or '0') for x in version_string.split('.')])
def generate(env):
if env.Bit('linux'):
env.AddMethod(EnableFeatureWherePackagePresent)
env.AddMethod(GetPackageParams)
env.AddMethod(BuildDebianPackage)
env.AddMethod(GetGccVersion)
def exists(env):
return 1 # Required by scons
|
bsd-3-clause
|
vCentre/vFRP-6233
|
frappe/tests/test_email.py
|
37
|
3558
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.test_runner import make_test_records
make_test_records("User")
make_test_records("Email Account")
class TestEmail(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabEmail Unsubscribe`""")
frappe.db.sql("""delete from `tabBulk Email`""")
def test_send(self):
from frappe.email import sendmail
sendmail('test@example.com', subject='Test Mail', msg="Test Content")
def test_bulk(self, send_after=None):
from frappe.email.bulk import send
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
reference_doctype='User', reference_name='Administrator',
subject='Testing Bulk', message='This is a bulk mail!', send_after=send_after)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_flush(self):
self.test_bulk(send_after = 1)
from frappe.email.bulk import flush
flush(from_test=True)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Sent'""", as_dict=1)
self.assertEquals(len(bulk), 0)
def test_send_after(self):
self.test_bulk()
from frappe.email.bulk import flush
flush(from_test=True)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
def test_expired(self):
self.test_bulk()
frappe.db.sql("update `tabBulk Email` set creation='2010-01-01 12:00:00'")
from frappe.email.bulk import flush
flush(from_test=True)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Expired'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
def test_unsubscribe(self):
from frappe.email.bulk import unsubscribe, send
unsubscribe(doctype="User", name="Administrator", email="test@example.com")
self.assertTrue(frappe.db.get_value("Email Unsubscribe",
{"reference_doctype": "User", "reference_name": "Administrator", "email": "test@example.com"}))
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
reference_doctype='User', reference_name= "Administrator",
subject='Testing Bulk', message='This is a bulk mail!')
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""",
as_dict=1)
self.assertEquals(len(bulk), 1)
self.assertFalse('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_bulk_limit(self):
from frappe.email.bulk import send, BulkLimitCrossedError
self.assertRaises(BulkLimitCrossedError, send,
recipients=['test@example.com']*1000,
sender="admin@example.com",
reference_doctype = "User", reference_name="Administrator",
subject='Testing Bulk', message='This is a bulk mail!')
if __name__=='__main__':
frappe.connect()
unittest.main()
|
mit
|
fenginx/django
|
tests/auth_tests/client.py
|
85
|
1414
|
import re
from django.contrib.auth.views import (
INTERNAL_RESET_SESSION_TOKEN, INTERNAL_RESET_URL_TOKEN,
)
from django.test import Client
def extract_token_from_url(url):
token_search = re.search(r'/reset/.*/(.+?)/', url)
if token_search:
return token_search.group(1)
class PasswordResetConfirmClient(Client):
"""
This client eases testing the password reset flow by emulating the
PasswordResetConfirmView's redirect and saving of the reset token in the
user's session. This request puts 'my-token' in the session and redirects
to '/reset/bla/set-password/':
>>> client = PasswordResetConfirmClient()
>>> client.get('/reset/bla/my-token/')
"""
def _get_password_reset_confirm_redirect_url(self, url):
token = extract_token_from_url(url)
if not token:
return url
# Add the token to the session
session = self.session
session[INTERNAL_RESET_SESSION_TOKEN] = token
session.save()
return url.replace(token, INTERNAL_RESET_URL_TOKEN)
def get(self, path, *args, **kwargs):
redirect_url = self._get_password_reset_confirm_redirect_url(path)
return super().get(redirect_url, *args, **kwargs)
def post(self, path, *args, **kwargs):
redirect_url = self._get_password_reset_confirm_redirect_url(path)
return super().post(redirect_url, *args, **kwargs)
|
bsd-3-clause
|
bitcommoditiz/P2Pool
|
p2pool/__init__.py
|
278
|
1595
|
import os
import re
import sys
import traceback
import subprocess
def check_output(*popenargs, **kwargs):
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise ValueError((retcode, output))
return output
def _get_version():
try:
try:
return check_output(['git', 'describe', '--always', '--dirty'], cwd=os.path.dirname(os.path.abspath(sys.argv[0]))).strip()
except:
pass
try:
return check_output(['git.cmd', 'describe', '--always', '--dirty'], cwd=os.path.dirname(os.path.abspath(sys.argv[0]))).strip()
except:
pass
root_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
git_dir = os.path.join(root_dir, '.git')
if os.path.exists(git_dir):
head = open(os.path.join(git_dir, 'HEAD')).read().strip()
prefix = 'ref: '
if head.startswith(prefix):
path = head[len(prefix):].split('/')
return open(os.path.join(git_dir, *path)).read().strip()[:7]
else:
return head[:7]
dir_name = os.path.split(root_dir)[1]
match = re.match('p2pool-([.0-9]+)', dir_name)
if match:
return match.groups()[0]
return 'unknown %s' % (dir_name.encode('hex'),)
except Exception, e:
traceback.print_exc()
return 'unknown %s' % (str(e).encode('hex'),)
__version__ = _get_version()
DEBUG = True
|
gpl-3.0
|
PredictiveScienceLab/inverse-bgo
|
pydes/_prior.py
|
2
|
1157
|
"""
Some priors for GPy.
Author:
Ilias Bilionis
Date:
5/5/2015
"""
__all__ = ['LogLogisticPrior', 'JeffreysPrior']
import GPy
import numpy as np
class LogLogisticPrior(GPy.priors.Prior):
"""
Log-Logistic prior suitable for lengthscale parameters.
From Conti & O'Hagan (2010)
"""
domain = GPy.priors._POSITIVE
def __init__(self):
"""
Initialize the object.
"""
pass
def __str__(self):
return 'LogLog'
def lnpdf(self, x):
return -np.log(1. + x ** 2)
def lnpdf_grad(self, x):
return -2. * x / (1. + x ** 2)
def rvs(self, n):
return np.exp(np.random.logistic(size=n))
class JeffreysPrior(GPy.priors.Prior):
"""
The uninformative Jeffrey's prior used for scale parameters.
"""
domain = GPy.priors._POSITIVE
def __init__(self):
"""
Initialize the object.
"""
pass
def __str__(self):
return 'JeffreysPrior()'
def lnpdf(self, x):
return -np.log(x)
def lnpdf_grad(self, x):
return -1. / x
def rvs(self, n):
return np.ones(n)
|
mit
|
jamesob/bitcoin
|
test/functional/wallet_upgradewallet.py
|
13
|
16487
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""upgradewallet RPC functional test
Test upgradewallet RPC. Download node binaries:
test/get_previous_releases.py -b v0.19.1 v0.18.1 v0.17.2 v0.16.3 v0.15.2
Only v0.15.2 and v0.16.3 are required by this test. The others are used in feature_backwards_compatibility.py
"""
import os
import shutil
import struct
from io import BytesIO
from test_framework.bdb import dump_bdb_kv
from test_framework.messages import deser_compact_size, deser_string
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_is_hex_string,
sha256sum_file,
)
UPGRADED_KEYMETA_VERSION = 12
def deser_keymeta(f):
ver, create_time = struct.unpack('<Iq', f.read(12))
kp_str = deser_string(f)
seed_id = f.read(20)
fpr = f.read(4)
path_len = 0
path = []
has_key_orig = False
if ver == UPGRADED_KEYMETA_VERSION:
path_len = deser_compact_size(f)
for i in range(0, path_len):
path.append(struct.unpack('<I', f.read(4))[0])
has_key_orig = bool(f.read(1))
return ver, create_time, kp_str, seed_id, fpr, path_len, path, has_key_orig
class UpgradeWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-addresstype=bech32", "-keypool=2"], # current wallet version
["-usehd=1", "-keypool=2"], # v0.16.3 wallet
["-usehd=0", "-keypool=2"] # v0.15.2 wallet
]
self.wallet_names = [self.default_wallet_name, None, None]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_bdb()
self.skip_if_no_previous_releases()
def setup_network(self):
self.setup_nodes()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
160300,
150200,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def dumb_sync_blocks(self):
"""
Little helper to sync older wallets.
Notice that v0.15.2's regtest is hardforked, so there is
no sync for it.
v0.15.2 is only being used to test for version upgrade
and master hash key presence.
v0.16.3 is being used to test for version upgrade and balances.
Further info: https://github.com/bitcoin/bitcoin/pull/18774#discussion_r416967844
"""
node_from = self.nodes[0]
v16_3_node = self.nodes[1]
to_height = node_from.getblockcount()
height = self.nodes[1].getblockcount()
for i in range(height, to_height+1):
b = node_from.getblock(blockhash=node_from.getblockhash(i), verbose=0)
v16_3_node.submitblock(b)
assert_equal(v16_3_node.getblockcount(), to_height)
def test_upgradewallet(self, wallet, previous_version, requested_version=None, expected_version=None):
unchanged = expected_version == previous_version
new_version = previous_version if unchanged else expected_version if expected_version else requested_version
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": "",
"previous_version": previous_version,
"current_version": new_version,
"result": "Already at latest version. Wallet version unchanged." if unchanged else "Wallet upgraded successfully from version {} to version {}.".format(previous_version, new_version),
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], new_version)
def test_upgradewallet_error(self, wallet, previous_version, requested_version, msg):
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
assert_equal(wallet.upgradewallet(requested_version),
{
"wallet_name": "",
"previous_version": previous_version,
"current_version": previous_version,
"error": msg,
}
)
assert_equal(wallet.getwalletinfo()["walletversion"], previous_version)
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
self.dumb_sync_blocks()
# # Sanity check the test framework:
res = self.nodes[0].getblockchaininfo()
assert_equal(res['blocks'], 101)
node_master = self.nodes[0]
v16_3_node = self.nodes[1]
v15_2_node = self.nodes[2]
# Send coins to old wallets for later conversion checks.
v16_3_wallet = v16_3_node.get_wallet_rpc('wallet.dat')
v16_3_address = v16_3_wallet.getnewaddress()
node_master.generatetoaddress(101, v16_3_address)
self.dumb_sync_blocks()
v16_3_balance = v16_3_wallet.getbalance()
self.log.info("Test upgradewallet RPC...")
# Prepare for copying of the older wallet
node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets", self.default_wallet_name)
node_master_wallet = os.path.join(node_master_wallet_dir, self.default_wallet_name, self.wallet_data_filename)
v16_3_wallet = os.path.join(v16_3_node.datadir, "regtest/wallets/wallet.dat")
v15_2_wallet = os.path.join(v15_2_node.datadir, "regtest/wallet.dat")
split_hd_wallet = os.path.join(v15_2_node.datadir, "regtest/splithd")
self.stop_nodes()
# Make split hd wallet
self.start_node(2, ['-usehd=1', '-keypool=2', '-wallet=splithd'])
self.stop_node(2)
def copy_v16():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.16.3 wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v16_3_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_non_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 non hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v15_2_wallet,
node_master_wallet_dir
)
node_master.loadwallet(self.default_wallet_name)
def copy_split_hd():
node_master.get_wallet_rpc(self.default_wallet_name).unloadwallet()
# Copy the 0.15.2 split hd wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
split_hd_wallet,
os.path.join(node_master_wallet_dir, 'wallet.dat')
)
node_master.loadwallet(self.default_wallet_name)
self.restart_node(0)
copy_v16()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
self.log.info("Test upgradewallet without a version argument")
self.test_upgradewallet(wallet, previous_version=159900, expected_version=169900)
# wallet should still contain the same balance
assert_equal(wallet.getbalance(), v16_3_balance)
copy_non_hd()
wallet = node_master.get_wallet_rpc(self.default_wallet_name)
# should have no master key hash before conversion
assert_equal('hdseedid' in wallet.getwalletinfo(), False)
self.log.info("Test upgradewallet with explicit version number")
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# after conversion master key hash should be present
assert_is_hex_string(wallet.getwalletinfo()['hdseedid'])
self.log.info("Intermediary versions don't effect anything")
copy_non_hd()
# Wallet starts with 60000
assert_equal(60000, wallet.getwalletinfo()['walletversion'])
wallet.unloadwallet()
before_checksum = sha256sum_file(node_master_wallet)
node_master.loadwallet('')
# Test an "upgrade" from 60000 to 129999 has no effect, as the next version is 130000
self.test_upgradewallet(wallet, previous_version=60000, requested_version=129999, expected_version=60000)
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Wallets cannot be downgraded')
copy_non_hd()
self.test_upgradewallet_error(wallet, previous_version=60000, requested_version=40000,
msg="Cannot downgrade wallet from version 60000 to version 40000. Wallet version unchanged.")
wallet.unloadwallet()
assert_equal(before_checksum, sha256sum_file(node_master_wallet))
node_master.loadwallet('')
self.log.info('Can upgrade to HD')
# Inspect the old wallet and make sure there is no hdchain
orig_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' not in orig_kvs
# Upgrade to HD, no split
self.test_upgradewallet(wallet, previous_version=60000, requested_version=130000)
# Check that there is now a hd chain and it is version 1, no internal chain counter
new_kvs = dump_bdb_kv(node_master_wallet)
assert b'\x07hdchain' in new_kvs
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(28, len(hd_chain))
hd_chain_version, external_counter, seed_id = struct.unpack('<iI20s', hd_chain)
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
old_kvs = new_kvs
# First 2 keys should still be non-HD
for i in range(0, 2):
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdkeypath' not in info
assert 'hdseedid' not in info
# Next key should be HD
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(seed_id.hex(), info['hdseedid'])
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
prev_seed_id = info['hdseedid']
# Change key should be the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/1\'', info['hdkeypath'])
self.log.info('Cannot upgrade to HD Split, needs Pre Split Keypool')
for version in [139900, 159900, 169899]:
self.test_upgradewallet_error(wallet, previous_version=130000, requested_version=version,
msg="Cannot upgrade a non HD split wallet from version {} to version {} without upgrading to "
"support pre-split keypool. Please use version 169900 or no version specified.".format(130000, version))
self.log.info('Upgrade HD to HD chain split')
self.test_upgradewallet(wallet, previous_version=130000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(0, internal_counter)
seed_id = bytearray(seed_id)
seed_id.reverse()
assert_equal(seed_id.hex(), prev_seed_id)
# Next change address is the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/2\'', info['hdkeypath'])
# Next change address is the new keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
# External addresses use the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/3\'', info['hdkeypath'])
self.log.info('Upgrade non-HD to HD chain split')
copy_non_hd()
self.test_upgradewallet(wallet, previous_version=60000, requested_version=169900)
# Check that the hdchain updated correctly
new_kvs = dump_bdb_kv(node_master_wallet)
hd_chain = new_kvs[b'\x07hdchain']
assert_equal(32, len(hd_chain))
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
# Drain the keypool by fetching one external key and one change key. Should still be the same keypool
info = wallet.getaddressinfo(wallet.getnewaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert 'hdseedid' not in info
assert 'hdkeypath' not in info
# The next addresses are HD and should be on different HD chains
info = wallet.getaddressinfo(wallet.getnewaddress())
ext_id = info['hdseedid']
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(ext_id, info['hdseedid'])
assert_equal('m/0\'/1\'/0\'', info['hdkeypath'])
self.log.info('KeyMetadata should upgrade when loading into master')
copy_v16()
old_kvs = dump_bdb_kv(v16_3_wallet)
new_kvs = dump_bdb_kv(node_master_wallet)
for k, old_v in old_kvs.items():
if k.startswith(b'\x07keymeta'):
new_ver, new_create_time, new_kp_str, new_seed_id, new_fpr, new_path_len, new_path, new_has_key_orig = deser_keymeta(BytesIO(new_kvs[k]))
old_ver, old_create_time, old_kp_str, old_seed_id, old_fpr, old_path_len, old_path, old_has_key_orig = deser_keymeta(BytesIO(old_v))
assert_equal(10, old_ver)
if old_kp_str == b"": # imported things that don't have keymeta (i.e. imported coinbase privkeys) won't be upgraded
assert_equal(new_kvs[k], old_v)
continue
assert_equal(12, new_ver)
assert_equal(new_create_time, old_create_time)
assert_equal(new_kp_str, old_kp_str)
assert_equal(new_seed_id, old_seed_id)
assert_equal(0, old_path_len)
assert_equal(new_path_len, len(new_path))
assert_equal([], old_path)
assert_equal(False, old_has_key_orig)
assert_equal(True, new_has_key_orig)
# Check that the path is right
built_path = []
for s in new_kp_str.decode().split('/')[1:]:
h = 0
if s[-1] == '\'':
s = s[:-1]
h = 0x80000000
p = int(s) | h
built_path.append(p)
assert_equal(new_path, built_path)
self.log.info('Upgrading to NO_DEFAULT_KEY should not remove the defaultkey')
copy_split_hd()
# Check the wallet has a default key initially
old_kvs = dump_bdb_kv(node_master_wallet)
defaultkey = old_kvs[b'\x0adefaultkey']
self.log.info("Upgrade the wallet. Should still have the same default key.")
self.test_upgradewallet(wallet, previous_version=139900, requested_version=159900)
new_kvs = dump_bdb_kv(node_master_wallet)
up_defaultkey = new_kvs[b'\x0adefaultkey']
assert_equal(defaultkey, up_defaultkey)
# 0.16.3 doesn't have a default key
v16_3_kvs = dump_bdb_kv(v16_3_wallet)
assert b'\x0adefaultkey' not in v16_3_kvs
if __name__ == '__main__':
UpgradeWalletTest().main()
|
mit
|
r-owen/stui
|
TUI/Scripts/Observations/Fiducial Monitor.py
|
1
|
5425
|
# fiducialMon.py
import time
import RO.Wdg
import Tkinter
import TUI.Models
import TUI.PlaySound
class ScriptClass(object):
def __init__(self, sr):
# if True, run in debug-only mode (which doesn't DO anything)
# if False, real time run
sr.debug = False
self.name="fiducialMon"
sr.master.winfo_toplevel().wm_resizable(True, True)
height=4; width=40
#log1
self.logWdg1 = RO.Wdg.LogWdg(master=sr.master, width=width, height =height,
helpText = "AZ fiducial window",)
self.logWdg1.grid(row=0, column=0, sticky="news")
# log2
self.logWdg2 = RO.Wdg.LogWdg(master = sr.master, width = width, height = height,
helpText = "Alt fiducial window", relief = "sunken", bd = 2,)
self.logWdg2.grid(row=1, column=0, sticky="news")
# log3
self.logWdg3 = RO.Wdg.LogWdg(master = sr.master, width = width, height = height,
helpText = "Rot fiducial window", relief = "sunken", bd = 2)
self.logWdg3.grid(row=2, column=0, sticky="news")
sr.master.rowconfigure(0, weight=1)
sr.master.rowconfigure(1, weight=1)
sr.master.rowconfigure(2, weight=1)
sr.master.columnconfigure(0, weight=1)
self.mcpModel = TUI.Models.getModel("mcp")
azFid0=self.crossFidFun(sr,self.mcpModel.azFiducialCrossing)
altFid0=self.crossFidFun(sr,self.mcpModel.altFiducialCrossing)
rotFid0=self.crossFidFun(sr,self.mcpModel.rotFiducialCrossing)
self.azMax=sr.getKeyVar(self.mcpModel.msOnMaxCorrection, ind=0, defVal=600)
self.altMax=sr.getKeyVar(self.mcpModel.msOnMaxCorrection, ind=1, defVal=600)
self.rotMax=sr.getKeyVar(self.mcpModel.msOnMaxCorrection, ind=2, defVal=600)
# self.azMax=2; self.altMax=2; self.rotMax=20
self.largeMes=" "*20+"error is too large to be corrected"
self.blueWarn=RO.Constants.sevWarning
self.logWdg1.addMsg("xx:xx:xx Az: %s " % (self.fidSS(azFid0)))
if abs(azFid0[3])>self.azMax :
self.logWdg1.addMsg("%s ( > %s)" % (self.largeMes,str(self.azMax)),severity=self.blueWarn)
self.warning()
self.logWdg2.addMsg("xx:xx:xx Alt: %s " % (self.fidSS(altFid0)))
if abs(altFid0[3])>self.altMax :
self.logWdg2.addMsg("%s ( > %s)" % (self.largeMes,str(self.altMax)),severity=self.blueWarn)
self.warning()
self.logWdg3.addMsg("xx:xx:xx Rot: %s " % (self.fidSS(rotFid0)))
if abs(rotFid0[3])>self.rotMax :
self.logWdg3.addMsg("%s ( > %s)" % (self.largeMes,str(self.rotMax)),severity=self.blueWarn)
self.warning()
self.mcpModel.azFiducialCrossing.addCallback(self.updateAz, callNow=False)
self.mcpModel.altFiducialCrossing.addCallback(self.updateAlt, callNow=False)
self.mcpModel.rotFiducialCrossing.addCallback(self.updateRot, callNow=False)
def getTAITimeStr(self,):
return time.strftime("%H:%M:%S",
time.gmtime(time.time() - - RO.Astro.Tm.getUTCMinusTAI()))
def fidSS(self,fid):
return "ind=%i, deg=%5.1f, err= %i" % (fid[0],fid[1],fid[3])
def crossFidFun(self,sr,keyVar):
crossFid=[0,0.0,0,0]
for i in range(0,4):
crossFid[i]=sr.getKeyVar(keyVar, ind=i, defVal=0)
return crossFid
def updateAz(self,keyVar):
if not keyVar.isGenuine: return
timeStr = self.getTAITimeStr()
self.logWdg1.addMsg("%s Az: %s " % (timeStr, self.fidSS(keyVar)))
ss="%s AZ: %s " % (timeStr, self.fidSS(keyVar))
if abs(keyVar[3])>self.azMax :
self.logWdg1.addMsg("%s ( > %s)" % (self.largeMes,str(self.azMax)),severity=self.blueWarn)
self.warning()
ss="Az: %s ( > %s)" % (self.largeMes,str(self.azMax))
def updateAlt(self,keyVar):
if not keyVar.isGenuine: return
timeStr = self.getTAITimeStr()
self.logWdg2.addMsg("%s Alt: %s " % (timeStr, self.fidSS(keyVar)))
ss="%s Alt: %s " % (timeStr, self.fidSS(keyVar))
if abs(keyVar[3])>self.altMax :
self.logWdg2.addMsg("%s ( > %s)" % (self.largeMes,str(self.altMax)),severity=self.blueWarn)
self.warning()
ss="Alt: %s ( > %s)" % (self.largeMes,str(self.altMax))
def updateRot(self,keyVar):
if not keyVar.isGenuine: return
timeStr = self.getTAITimeStr()
self.logWdg3.addMsg("%s Rot: %s " % (timeStr, self.fidSS(keyVar)))
ss="%s Rot: %s " % (timeStr, self.fidSS(keyVar))
if abs(keyVar[3])>self.rotMax :
self.logWdg3.addMsg("%s ( > %s)" % (self.largeMes,str(self.rotMax)),severity=self.blueWarn)
self.warning()
ss="Rot: %s ( > %s)" % (self.largeMes,str(self.rotMax))
def warning(self,):
TUI.PlaySound.fiducialCrossing()
def run(self, sr):
pass
def end(self, sr):
pass
# self.mcpModel.azFiducialCrossing.removeCallback(self.updateAz)
# self.mcpModel.altFiducialCrossing.removeCallback(self.updateAlt)
# self.mcpModel.rotFiducialCrossing.removeCallback(self.updateRot)
# self.logWdg1.addMsg(" stopped")
# self.logWdg2.addMsg(" stopped")
# self.logWdg3.addMsg(" stopped")
|
bsd-3-clause
|
revanthkolli/osf.io
|
tests/test_addons_oauth.py
|
21
|
8289
|
from nose.tools import *
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website import settings
from website.addons.base import AddonConfig
from website.addons.base import AddonOAuthNodeSettingsBase
from website.addons.base import AddonOAuthUserSettingsBase
from website.oauth.models import ExternalProvider
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory
from tests.factories import ExternalAccountFactory
from tests.factories import MockOAuth2Provider
from tests.factories import ProjectFactory
class MockNodeSettings(AddonOAuthNodeSettingsBase):
oauth_provider = MockOAuth2Provider
class MockUserSettings(AddonOAuthUserSettingsBase):
oauth_provider = MockOAuth2Provider
class TestNodeSettings(OsfTestCase):
ADDONS_UNDER_TEST = {
MockOAuth2Provider.short_name: {
'user_settings': MockUserSettings,
'node_settings': MockNodeSettings,
}
}
@classmethod
def setUpClass(cls):
super(TestNodeSettings, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestNodeSettings, cls).tearDownClass()
def setUp(self):
super(TestNodeSettings, self).setUp()
self.project = ProjectFactory()
self.user = self.project.creator
self.node_settings = self.project.get_or_add_addon(
MockNodeSettings.oauth_provider.short_name,
auth=Auth(user=self.user)
)
self.user_settings = self.user.get_or_add_addon(
MockUserSettings.oauth_provider.short_name
)
self.external_account = ExternalAccountFactory()
self.user.external_accounts.append(self.external_account)
self.user.save()
def test_has_auth_false(self):
assert_false(self.node_settings.has_auth)
def test_has_auth_no_grant(self):
self.node_settings.external_account = self.external_account
self.node_settings.user_settings = self.user_settings
assert_false(self.node_settings.has_auth)
def test_has_auth(self):
self.node_settings.set_auth(
external_account=self.external_account,
user=self.user
)
assert_true(self.node_settings.has_auth)
def test_set_auth(self):
self.node_settings.set_auth(
external_account=self.external_account,
user=self.user
)
assert_equal(
self.node_settings.external_account,
self.external_account
)
assert_equal(
self.node_settings.user_settings,
self.user_settings
)
assert_in(
self.project._id,
self.user_settings.oauth_grants.keys()
)
def test_revoke_auth(self):
self.node_settings.set_auth(
external_account=self.external_account,
user=self.user
)
self.user_settings.revoke_oauth_access(self.external_account)
assert_equal(
self.user_settings.oauth_grants,
{self.project._id: {}}
)
def test_clear_auth(self):
self.node_settings.external_account = self.external_account
self.node_settings.user_settings = self.user_settings
self.node_settings.clear_auth()
assert_is_none(self.node_settings.external_account)
assert_is_none(self.node_settings.user_settings)
class TestUserSettings(OsfTestCase):
ADDONS_UNDER_TEST = {
MockOAuth2Provider.short_name: {
'user_settings': MockUserSettings,
'node_settings': MockNodeSettings,
}
}
@classmethod
def setUpClass(cls):
super(TestUserSettings, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestUserSettings, cls).tearDownClass()
def setUp(self):
super(TestUserSettings, self).setUp()
self.user = AuthUserFactory()
self.user_settings = self.user.get_or_add_addon(
MockUserSettings.oauth_provider.short_name
)
self.external_account = ExternalAccountFactory()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.project = ProjectFactory(creator=self.user)
def tearDown(self):
super(TestUserSettings, self).tearDown()
def test_connected_accounts_empty(self):
self.user.external_accounts = []
assert_equal(
self.user_settings.external_accounts,
[]
)
def test_connected_accounts(self):
assert_equal(
self.user_settings.external_accounts,
[self.external_account]
)
def test_verify_false_no_grants(self):
assert_false(
self.user_settings.verify_oauth_access(
external_account=self.external_account,
node=self.project
)
)
def test_verify_false_with_grants(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=ProjectFactory()
)
assert_false(
self.user_settings.verify_oauth_access(
external_account=self.external_account,
node=self.project
)
)
def test_verify_false_metadata(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project,
metadata={'foo': 'bar'}
)
assert_false(
self.user_settings.verify_oauth_access(
external_account=self.external_account,
node=self.project,
metadata={'baz': 'qiz'}
)
)
def test_verify_true(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project
)
assert_true(
self.user_settings.verify_oauth_access(
external_account=self.external_account,
node=self.project
)
)
def test_verify_true_with_metadata(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project,
metadata={'foo': 'bar'}
)
assert_true(
self.user_settings.verify_oauth_access(
external_account=self.external_account,
node=self.project,
metadata={'foo': 'bar'}
)
)
def test_grant(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project
)
assert_equal(
self.user_settings.oauth_grants,
{
self.project._id: {
self.external_account._id: {}
}
}
)
def test_grant_not_owned(self):
self.user.external_accounts = []
with assert_raises(PermissionsError):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project
)
assert_equal(
self.user_settings.oauth_grants,
{}
)
def test_grant_metadata(self):
self.user_settings.grant_oauth_access(
external_account=self.external_account,
node=self.project,
metadata={'foo': 'bar'}
)
assert_equal(
self.user_settings.oauth_grants,
{
self.project._id: {
self.external_account._id: {'foo': 'bar'}
}
}
)
def test_on_delete(self):
node_settings = self.project.get_or_add_addon(
MockUserSettings.oauth_provider.short_name,
auth=Auth(user=self.user)
)
node_settings.set_auth(
external_account=self.external_account,
user=self.user
)
self.user.delete_addon(
MockUserSettings.oauth_provider.short_name
)
node_settings.reload()
assert_is_none(node_settings.external_account)
assert_is_none(node_settings.user_settings)
|
apache-2.0
|
makelove/OpenCV-Python-Tutorial
|
cv-Tkinter-GUI/Displaying-video-feed-with-OpenCV-and-Tkinter.py
|
1
|
1115
|
# -*- coding: utf-8 -*-
# @Time : 2018/2/8 16:09
# @Author : play4fun
# @File : Displaying a video feed with OpenCV and Tkinter.py
# @Software: PyCharm
"""
Displaying a video feed with OpenCV and Tkinter.py:
https://www.pyimagesearch.com/2016/05/30/displaying-a-video-feed-with-opencv-and-tkinter/
"""
# import the necessary packages
from __future__ import print_function
from photoboothapp import PhotoBoothApp
from imutils.video import VideoStream
import argparse
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True,
help="path to output directory to store snapshots")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# initialize the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# start the app
pba = PhotoBoothApp(vs, args["output"])
pba.root.mainloop()
|
mit
|
guileschool/BEAGLEBONE-tutorials
|
BBB-firmware/u-boot-v2015.10-rc2/tools/patman/checkpatch.py
|
25
|
6127
|
# Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import collections
import command
import gitutil
import os
import re
import sys
import terminal
def FindCheckPatch():
top_level = gitutil.GetTopLevel()
try_list = [
os.getcwd(),
os.path.join(os.getcwd(), '..', '..'),
os.path.join(top_level, 'tools'),
os.path.join(top_level, 'scripts'),
'%s/bin' % os.getenv('HOME'),
]
# Look in current dir
for path in try_list:
fname = os.path.join(path, 'checkpatch.pl')
if os.path.isfile(fname):
return fname
# Look upwwards for a Chrome OS tree
while not os.path.ismount(path):
fname = os.path.join(path, 'src', 'third_party', 'kernel', 'files',
'scripts', 'checkpatch.pl')
if os.path.isfile(fname):
return fname
path = os.path.dirname(path)
sys.exit('Cannot find checkpatch.pl - please put it in your ' +
'~/bin directory or use --no-check')
def CheckPatch(fname, verbose=False):
"""Run checkpatch.pl on a file.
Returns:
namedtuple containing:
ok: False=failure, True=ok
problems: List of problems, each a dict:
'type'; error or warning
'msg': text message
'file' : filename
'line': line number
errors: Number of errors
warnings: Number of warnings
checks: Number of checks
lines: Number of lines
stdout: Full output of checkpatch
"""
fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
'stdout']
result = collections.namedtuple('CheckPatchResult', fields)
result.ok = False
result.errors, result.warning, result.checks = 0, 0, 0
result.lines = 0
result.problems = []
chk = FindCheckPatch()
item = {}
result.stdout = command.Output(chk, '--no-tree', fname)
#pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
#stdout, stderr = pipe.communicate()
# total: 0 errors, 0 warnings, 159 lines checked
# or:
# total: 0 errors, 2 warnings, 7 checks, 473 lines checked
re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
re_stats_full = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)'
' checks, (\d+)')
re_ok = re.compile('.*has no obvious style problems')
re_bad = re.compile('.*has style problems, please review')
re_error = re.compile('ERROR: (.*)')
re_warning = re.compile('WARNING: (.*)')
re_check = re.compile('CHECK: (.*)')
re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')
for line in result.stdout.splitlines():
if verbose:
print line
# A blank line indicates the end of a message
if not line and item:
result.problems.append(item)
item = {}
match = re_stats_full.match(line)
if not match:
match = re_stats.match(line)
if match:
result.errors = int(match.group(1))
result.warnings = int(match.group(2))
if len(match.groups()) == 4:
result.checks = int(match.group(3))
result.lines = int(match.group(4))
else:
result.lines = int(match.group(3))
elif re_ok.match(line):
result.ok = True
elif re_bad.match(line):
result.ok = False
err_match = re_error.match(line)
warn_match = re_warning.match(line)
file_match = re_file.match(line)
check_match = re_check.match(line)
if err_match:
item['msg'] = err_match.group(1)
item['type'] = 'error'
elif warn_match:
item['msg'] = warn_match.group(1)
item['type'] = 'warning'
elif check_match:
item['msg'] = check_match.group(1)
item['type'] = 'check'
elif file_match:
item['file'] = file_match.group(1)
item['line'] = int(file_match.group(2))
return result
def GetWarningMsg(col, msg_type, fname, line, msg):
'''Create a message for a given file/line
Args:
msg_type: Message type ('error' or 'warning')
fname: Filename which reports the problem
line: Line number where it was noticed
msg: Message to report
'''
if msg_type == 'warning':
msg_type = col.Color(col.YELLOW, msg_type)
elif msg_type == 'error':
msg_type = col.Color(col.RED, msg_type)
elif msg_type == 'check':
msg_type = col.Color(col.MAGENTA, msg_type)
return '%s: %s,%d: %s' % (msg_type, fname, line, msg)
def CheckPatches(verbose, args):
'''Run the checkpatch.pl script on each patch'''
error_count, warning_count, check_count = 0, 0, 0
col = terminal.Color()
for fname in args:
result = CheckPatch(fname, verbose)
if not result.ok:
error_count += result.errors
warning_count += result.warnings
check_count += result.checks
print '%d errors, %d warnings, %d checks for %s:' % (result.errors,
result.warnings, result.checks, col.Color(col.BLUE, fname))
if (len(result.problems) != result.errors + result.warnings +
result.checks):
print "Internal error: some problems lost"
for item in result.problems:
print GetWarningMsg(col, item.get('type', '<unknown>'),
item.get('file', '<unknown>'),
item.get('line', 0), item.get('msg', 'message'))
print
#print stdout
if error_count or warning_count or check_count:
str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
color = col.GREEN
if warning_count:
color = col.YELLOW
if error_count:
color = col.RED
print col.Color(color, str % (error_count, warning_count, check_count))
return False
return True
|
mit
|
dodger487/MIST
|
data/magnetak_detectors.py
|
1
|
14236
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Chris Riederer
# Google, Inc
# 2014-08-26
"""Contains the detectors used in MagnetAK"""
import numpy as np
class Detector(object):
"""Abstract base class of magnet-button-press detectors"""
def detect(self, runData):
"""This method should take in a runData dictionary and output times"""
raise NotImplementedError("Please implement this method")
def setParameters(self, args):
"""This method should take in a list of parameters and set them appropriately"""
raise NotImplementedError("Please implement this method")
def evaluateCost(self, runData, isPositive):
"""This method should return a cost for how far away the detector was from being correct"""
raise NotImplementedError("Please implement this method")
class OriginalDetector(Detector):
"""The original detector for paperscope"""
def __init__(self):
self.T1 = 30
self.T2 = 130
# self.T1 = 90 # trying to make difficult false positives
# self.T2 = 90
# self.T1 = 31.5 # found by optimization
# self.T2 = 123.5
def setParameters(self, args):
self.T1 = args[0]
self.T2 = args[1]
def detect(self, runData):
segment_size = 20
# segment_size = 10
window_size = segment_size * 2
history = []
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
lastFiring = 0 # keep track of last time button was pulled
for window in np.arange(len(data) - window_size):
window_end = window + window_size
if window_end - lastFiring < window_size: # wait for a full window before looking again
continue
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = data[window_end, 2:5]
# Also, split the whole window into segments.
# TODO: Calculate the segment size in samples based on time.
segments = np.arange(window, window_end, segment_size)
# A place for the means and maximums.
means = []
maximums = []
minimums = []
for segment in segments:
# Calculate the offset for each of the samples in the segment.
samples = data[segment:segment + segment_size, 2:5]
offsets = samples - S0
norms = [np.linalg.norm(row) for row in offsets]
# Calculate the metrics for each segment.
means.append(np.mean(norms))
maximums.append(np.max(norms))
minimums.append(np.min(norms))
# Apply the thresholds to the computed statistics.
min_1 = minimums[0]
max_2 = maximums[1]
# Store I_1, M_2 and I_3 for each window.
#history.append([window_end, U1, M2, U3, np.linalg.norm(S0)])
history.append([window_end, min_1, max_2, np.linalg.norm(S0), np.linalg.norm(S0)])
# print [window_end, min_1, max_2, np.linalg.norm(S0), np.linalg.norm(S0)]
#print 'Window %d: I_1=%f, M_2=%f, I_3=%f' % (window, U1, M2, U3)
#if U1 < MAX_U1 and U3 < MAX_U3 and M2 > MIN_M2:
# print min_1, max_2
if min_1 < self.T1 and max_2 > self.T2:
detections.append(data[window_end, 0])
lastFiring = window_end
return detections
def evaluateCost(self, runData, isPositive):
segment_size = 10
window_size = segment_size * 2
history = []
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
lastFiring = 0 # keep track of last time button was pulled
for window in np.arange(len(data) - window_size):
window_end = window + window_size
if window_end - lastFiring < window_size: # wait for a full window before looking again
continue
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = data[window_end, 2:5]
# Also, split the whole window into segments.
# TODO: Calculate the segment size in samples based on time.
segments = np.arange(window, window_end, segment_size)
# A place for the means and maximums.
means = []
maximums = []
minimums = []
for segment in segments:
# Calculate the offset for each of the samples in the segment.
samples = data[segment:segment + segment_size, 2:5]
offsets = samples - S0
norms = [np.linalg.norm(row) for row in offsets]
# Calculate the metrics for each segment.
means.append(np.mean(norms))
maximums.append(np.max(norms))
minimums.append(np.min(norms))
# Apply the thresholds to the computed statistics.
min_1 = minimums[0]
max_2 = maximums[1]
# Store cost for each window. Cost=0 if it fires and is positive.
if isPositive:
cost = max(0, min_1 - self.T1) + max(0, self.T2 - max_2)
else:
cost = max(0, self.T1 - min_1) + max(0, max_2 - self.T2)
history.append(cost)
return min(history) if isPositive else max(history)
class TimeWindowDetector(Detector):
"""The original detector for paperscope modified to use time instead of samples"""
def __init__(self):
self.T1 = 30
self.T2 = 130
self.segment_time = 200 # ms
self.waitTime = 350 # ms
self.args = [self.T1, self.T2, self.segment_time]
def setParameters(self, args):
self.T1 = args[0]
self.T2 = args[1]
self.segment_time = args[2]
def detect(self, runData):
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
waitTime = self.waitTime * 1e6
history = []
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0] # times
lastFiring = 0 # keep track of last time button was pulled
for sensorTime in domain[domain > domain[0]+window_size]:
segment1 = data[(domain > sensorTime - window_size) & (domain <= sensorTime - segment_time_ns)]
segment2 = data[(domain > sensorTime - segment_time_ns) & (domain <= sensorTime)]
window = data[(domain > sensorTime - window_size) & (domain <= sensorTime)]
# wait to fire if we don't have any sensor events
if len(segment1) == 0 or len(segment2) == 0:
continue
if sensorTime - lastFiring < waitTime: # wait for a full window before looking again
continue
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = segment2[-1, 2:5]
# A place for the means and maximums.
means = []
maximums = []
minimums = []
for segment in [segment1, segment2]:
# Calculate the offset for each of the samples in the segment.
samples = segment[:, 2:5]
offsets = samples - S0
norms = [np.linalg.norm(row) for row in offsets]
# Calculate the metrics for each segment.
# print segment
# print offsets
# print norms
means.append(np.mean(norms))
maximums.append(np.max(norms))
minimums.append(np.min(norms))
# Apply the thresholds to the computed statistics.
min_1 = minimums[0]
max_2 = maximums[1]
if min_1 < self.T1 and max_2 > self.T2:
detections.append(sensorTime)
lastFiring = sensorTime
return detections
def evaluateCost(self, runData, isPositive):
segment_time_ns = self.segment_time * 1e6 # convert to nanoseconds
window_size = segment_time_ns * 2
history = []
detections = []
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0] # times
lastFiring = 0 # keep track of last time button was pulled
# print runData['filename']
for sensorTime in domain[domain > domain[0]+window_size]:
segment1 = data[(domain > sensorTime - window_size) & (domain <= sensorTime - segment_time_ns)]
segment2 = data[(domain > sensorTime - segment_time_ns) & (domain <= sensorTime)]
window = data[(domain > sensorTime - window_size) & (domain <= sensorTime)]
# wait to fire if we don't have any sensor events
if len(segment1) == 0 or len(segment2) == 0:
continue
# TODO: deal with extra firings
if sensorTime - lastFiring < segment_time_ns: # wait for a full window before looking again
continue
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = segment2[-1, 2:5]
# A place for the means and maximums.
means = []
maximums = []
minimums = []
for segment in [segment1, segment2]:
# Calculate the offset for each of the samples in the segment.
samples = segment[:, 2:5]
offsets = samples - S0
norms = [np.linalg.norm(row) for row in offsets]
# Calculate the metrics for each segment.
means.append(np.mean(norms))
maximums.append(np.max(norms))
minimums.append(np.min(norms))
# Apply the thresholds to the computed statistics.
min_1 = minimums[0]
max_2 = maximums[1]
# Store cost for each window. Cost=0 if it fires and is positive.
if isPositive:
cost = max(0, min_1 - self.T1) + max(0, self.T2 - max_2)
else:
cost = max(0, self.T1 - min_1) + max(0, max_2 - self.T2)
history.append(cost)
return min(history) if isPositive else max(history)
class VectorChangeDetector(Detector):
"""TODO"""
def __init__(self):
self.window_size = 100 # window size in milliseconds
self.Xlo = -3
self.Yhi = 18
self.Zhi = 6
# self.window_size, self.Xlo, self.Yhi, self.Zhi = [-0.38549015, -2.03822928, 4.02000427, 2.27985856]
# self.Xlo, self.Yhi, self.Zhi = [-0.23956289, -0.23956307 , 0.1800537 ]
# self.args = [self.window_size, self.Xlo, self.Yhi, self.Zhi]
self.args = [self.Xlo, self.Yhi, self.Zhi]
def setParameters(self, args):
self.Xlo, self.Yhi, self.Zhi = args
# self.window_size = abs(args[0])
# self.Xlo = args[1]
# self.Yhi = args[2]
# self.Zhi = args[3]
self.args = args
def detectAndEvaluate(self, runData, isPositive=1):
history = []
window_size = self.window_size * 1e6 # convert to nanoseconds
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
X = data[:,2]
Y = data[:,3]
Z = data[:,4]
detections = []
lastTime = 0
for index, timeStart in enumerate(domain[domain < domain[-1]-window_size]):
nextIndex = len(domain[domain < timeStart + window_size])
currentTime = domain[nextIndex]
if currentTime - lastTime < 350000000:
continue
oldValues = data[index, 2:5]
currentValues = data[nextIndex, 2:5]
difference = currentValues - oldValues
# print difference
if difference[0] < self.Xlo and difference[1] > self.Yhi and difference[2] > self.Zhi:
t = domain[nextIndex]
detections.append(t)
lastTime = currentTime
# Store cost for each window. Cost=0 if it fires and is positive.
if isPositive:
cost = max(0, difference[0] - self.Xlo) + max(0, self.Yhi - difference[1]) + max(0, self.Zhi - difference[2])
# cost = cost**2
else:
cost = max(0, -difference[0] + self.Xlo) + max(0, -self.Yhi + difference[1]) + max(0, -self.Zhi + difference[2])
# cost = cost**2
history.append(cost)
out_cost = min(history) if isPositive else max(history)
return (detections, out_cost)
def detect(self, runData):
detections, out_cost = self.detectAndEvaluate(runData)
return detections
def evaluateCost(self, runData, isPositive):
detections, out_cost = self.detectAndEvaluate(runData, isPositive)
return out_cost
class VectorDistanceDetector(Detector):
"""TODO"""
def __init__(self):
self.window_size = 100 # window size in milliseconds
self.X = 0
self.Y = 0
self.Z = 0
self.threshold = 0.1
self.args = [self.X, self.Y, self.Z, self.threshold, self.window_size]
def setParameters(self, args):
self.X, self.Y, self.Z, self.threshold, self.window_size = args
self.args = args
def detectAndEvaluate(self, runData, isPositive=1):
history = []
window_size = self.window_size * 1e6 # convert to nanoseconds
data = np.array(runData["magnetometer"])
data = data[data[:, 2:].any(1)]
domain = data[:,0]
X = data[:,2]
Y = data[:,3]
Z = data[:,4]
detections = []
lastTime = 0
for index, timeStart in enumerate(domain[domain < domain[-1]-window_size]):
nextIndex = len(domain[domain < timeStart + window_size])
currentTime = domain[nextIndex]
if currentTime - lastTime < 350000000:
continue
oldValues = data[index, 2:5]
currentValues = data[nextIndex, 2:5]
difference = currentValues - oldValues
distance = np.sqrt(sum(difference**2))
# print difference
if distance < self.threshold:
t = domain[nextIndex]
detections.append(t)
lastTime = currentTime
# Store cost for each window. Cost=0 if it fires and is positive.
if isPositive:
cost = max(0, self.threshold - distance)
else:
cost = max(0, distance - self.threshold)
history.append(cost)
out_cost = min(history) if isPositive else max(history)
return (detections, out_cost)
def detect(self, runData):
detections, out_cost = self.detectAndEvaluate(runData)
return detections
def evaluateCost(self, runData, isPositive):
detections, out_cost = self.detectAndEvaluate(runData, isPositive)
return out_cost
|
apache-2.0
|
CodingCat/mxnet
|
example/dsd/mlp.py
|
52
|
5126
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import os
import logging
import argparse
from math import ceil
import sparse_sgd
# symbol net
def get_symbol():
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(act1, name='fc2', num_hidden=64)
act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
softmax = mx.symbol.SoftmaxOutput(fc3, name='sm')
return softmax
# download ubyte version of mnist and untar
def download_data():
if not os.path.isdir("data/"):
os.system("mkdir data/")
if (not os.path.exists('data/train-images-idx3-ubyte')) or \
(not os.path.exists('data/train-labels-idx1-ubyte')) or \
(not os.path.exists('data/t10k-images-idx3-ubyte')) or \
(not os.path.exists('data/t10k-labels-idx1-ubyte')):
os.system("wget -q http://data.mxnet.io/mxnet/data/mnist.zip -P data/")
os.chdir("./data")
os.system("unzip -u mnist.zip")
os.chdir("..")
# get data iterators
def get_iters(batch_size):
train = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size,
shuffle=True,
flat=True,
silent=False,
seed=10)
val = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size,
shuffle=True,
flat=True,
silent=False)
return (train, val)
def test_mlp(args):
# get parameters
prefix = './mlp'
batch_size = 100
pruning_switch_epoch = [int(i) for i in args.pruning_switch_epoch.split(',')]
num_epoch = pruning_switch_epoch[-1]
batches_per_epoch = ceil(60000.0/batch_size)
weight_sparsity = args.weight_sparsity
bias_sparsity = args.bias_sparsity
weight_threshold = args.weight_threshold
bias_threshold = args.bias_threshold
if args.weight_sparsity:
weight_sparsity = [float(i) for i in args.weight_sparsity.split(',')]
bias_sparsity = [float(i) for i in args.bias_sparsity.split(',')]
else:
weight_threshold = [float(i) for i in args.weight_threshold.split(',')]
bias_threshold = [float(i) for i in args.bias_threshold.split(',')]
# get symbols and iterators
sym = get_symbol()
download_data()
(train, val) = get_iters(batch_size)
# fit model
model = mx.mod.Module(
sym,
context=[mx.cpu(i) for i in range(2)],
data_names=['data'],
label_names=['sm_label'])
optimizer_params = {
'learning_rate' : 0.1,
'wd' : 0.004,
'momentum' : 0.9,
'pruning_switch_epoch' : pruning_switch_epoch,
'batches_per_epoch' : batches_per_epoch,
'weight_sparsity' : weight_sparsity,
'bias_sparsity' : bias_sparsity,
'weight_threshold' : weight_threshold,
'bias_threshold' : bias_threshold}
logging.info('Start training...')
model.fit(train,
eval_data=val,
eval_metric='acc',
epoch_end_callback=mx.callback.do_checkpoint(prefix),
num_epoch=num_epoch,
optimizer='sparsesgd',
optimizer_params=optimizer_params)
logging.info('Finish traning...')
# remove files
for i in range(num_epoch):
os.remove('%s-%04d.params' % (prefix, i + 1))
os.remove('%s-symbol.json' % prefix)
if __name__ == "__main__":
# print logging by default
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="sparse training")
parser.add_argument('--pruning_switch_epoch', type=str)
parser.add_argument('--weight_sparsity', type=str, default=None)
parser.add_argument('--bias_sparsity', type=str, default=None)
parser.add_argument('--weight_threshold', type=str, default=None)
parser.add_argument('--bias_threshold', type=str, default=None)
args = parser.parse_args()
test_mlp(args)
|
apache-2.0
|
ibab/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_test.py
|
8
|
11058
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class MultivariateNormalTest(tf.test.TestCase):
def testNonmatchingMuSigmaFails(self):
with tf.Session():
mvn = tf.contrib.distributions.MultivariateNormal(
mu=[1.0, 2.0],
sigma=[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]])
with self.assertRaisesOpError(
r"Rank of mu should be one less than rank of sigma"):
mvn.mean.eval()
mvn = tf.contrib.distributions.MultivariateNormal(
mu=[[1.0], [2.0]],
sigma=[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 0.0],
[0.0, 1.0]]])
with self.assertRaisesOpError(
r"mu.shape and sigma.shape\[\:-1\] must match"):
mvn.mean.eval()
def testNotPositiveDefiniteSigmaFails(self):
with tf.Session():
mvn = tf.contrib.distributions.MultivariateNormal(
mu=[[1.0, 2.0], [1.0, 2.0]],
sigma=[[[1.0, 0.0],
[0.0, 1.0]],
[[1.0, 1.0],
[1.0, 1.0]]])
with self.assertRaisesOpError(
r"LLT decomposition was not successful."):
mvn.mean.eval()
mvn = tf.contrib.distributions.MultivariateNormal(
mu=[[1.0, 2.0], [1.0, 2.0]],
sigma=[[[1.0, 0.0],
[0.0, 1.0]],
[[-1.0, 0.0],
[0.0, 1.0]]])
with self.assertRaisesOpError(
r"LLT decomposition was not successful."):
mvn.mean.eval()
mvn = tf.contrib.distributions.MultivariateNormal(
mu=[[1.0, 2.0], [1.0, 2.0]],
sigma_chol=[[[1.0, 0.0],
[0.0, 1.0]],
[[-1.0, 0.0],
[0.0, 1.0]]])
with self.assertRaisesOpError(
r"sigma_chol is not positive definite."):
mvn.mean.eval()
def testLogPDFScalar(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(mu_v)
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(sigma_v)
x = np.array([-2.5, 2.5], dtype=np.float32)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testLogPDFScalarSigmaHalf(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0, 1.0], dtype=np.float32)
mu = tf.constant(mu_v)
sigma_v = np.array([[1.0, 0.1, 0.2],
[0.1, 2.0, 0.05],
[0.2, 0.05, 3.0]], dtype=np.float32)
sigma_chol_v = np.linalg.cholesky(sigma_v)
sigma_chol = tf.constant(sigma_chol_v)
x = np.array([-2.5, 2.5, 1.0], dtype=np.float32)
mvn = tf.contrib.distributions.MultivariateNormal(
mu=mu, sigma_chol=sigma_chol)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
sigma = mvn.sigma
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual(sigma.get_shape(), (3, 3))
self.assertAllClose(sigma_v, sigma.eval())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testLogPDF(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(mu_v)
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(sigma_v)
x = np.array([[-2.5, 2.5], [4.0, 0.0], [-1.0, 2.0]], dtype=np.float32)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual(log_pdf.get_shape(), (3,))
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testLogPDFMatchingDimension(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(np.vstack(3 * [mu_v]))
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(np.vstack(3 * [sigma_v[np.newaxis, :]]))
x = np.array([[-2.5, 2.5], [4.0, 0.0], [-1.0, 2.0]], dtype=np.float32)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual(log_pdf.get_shape(), (3,))
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testLogPDFMultidimensional(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(np.vstack(15 * [mu_v]).reshape(3, 5, 2))
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(
np.vstack(15 * [sigma_v[np.newaxis, :]]).reshape(3, 5, 2, 2))
x = np.array([-2.5, 2.5], dtype=np.float32)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
log_pdf = mvn.log_pdf(x)
pdf = mvn.pdf(x)
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_log_pdf = np.vstack(15 * [scipy_mvn.logpdf(x)]).reshape(3, 5)
expected_pdf = np.vstack(15 * [scipy_mvn.pdf(x)]).reshape(3, 5)
self.assertEqual(log_pdf.get_shape(), (3, 5))
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testEntropy(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(mu_v)
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(sigma_v)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
entropy = mvn.entropy()
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_entropy = scipy_mvn.entropy()
self.assertEqual(entropy.get_shape(), ())
self.assertAllClose(expected_entropy, entropy.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testEntropyMultidimensional(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(np.vstack(15 * [mu_v]).reshape(3, 5, 2))
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(
np.vstack(15 * [sigma_v[np.newaxis, :]]).reshape(3, 5, 2, 2))
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
entropy = mvn.entropy()
try:
from scipy import stats # pylint: disable=g-import-not-at-top
scipy_mvn = stats.multivariate_normal(mean=mu_v, cov=sigma_v)
expected_entropy = np.vstack(15 * [scipy_mvn.entropy()]).reshape(3, 5)
self.assertEqual(entropy.get_shape(), (3, 5))
self.assertAllClose(expected_entropy, entropy.eval())
except ImportError as e:
tf.logging.warn("Cannot test stats functions: %s" % str(e))
def testSample(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(mu_v)
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(sigma_v)
n = tf.constant(100000)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 2))
self.assertAllClose(sample_values.mean(axis=0), mu_v, atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0), sigma_v, atol=1e-1)
def testSampleMultiDimensional(self):
with tf.Session():
mu_v = np.array([-3.0, 3.0], dtype=np.float32)
mu = tf.constant(np.vstack(15 * [mu_v]).reshape(3, 5, 2))
sigma_v = np.array([[1.0, 0.5], [0.5, 1.0]], dtype=np.float32)
sigma = tf.constant(
np.vstack(15 * [sigma_v[np.newaxis, :]]).reshape(3, 5, 2, 2))
n = tf.constant(100000)
mvn = tf.contrib.distributions.MultivariateNormal(mu=mu, sigma=sigma)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 3, 5, 2))
sample_values = sample_values.reshape(100000, 15, 2)
for i in range(15):
self.assertAllClose(
sample_values[:, i, :].mean(axis=0), mu_v, atol=1e-2)
self.assertAllClose(
np.cov(sample_values[:, i, :], rowvar=0), sigma_v, atol=1e-1)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
addgene/giraffe
|
src/django/giraffe/blat/models.py
|
2
|
11010
|
from django.db import models
from django.db import utils
import datetime
import hashlib
import re
class BadSequenceError(Exception):
def __init__(self,why):
self.why = why
def __str__(self):
return repr(self.why)
class Giraffe_Mappable_Model(models.Model):
"""
This is an abstract class for other apps to use with their models;
this class requires the giraffe.blat app being deployed with the
app using this class.
"""
class Meta:
abstract = True
sequence = models.TextField(null=True,blank=True)
sequence_giraffe_id = models.CharField(max_length=64,null=True,blank=True)
sequence_giraffe_time = models.DateTimeField(null=True,blank=True)
def sequence_giraffe_unixtime(self):
"""Useful for using the unixtime as a timestamp in URL, to
help with caching."""
import time
return int(time.mktime(self.sequence_giraffe_time.timetuple()))
@staticmethod
def detect_features(sequence,db_name):
import frags.features
import orfs
db = Feature_Database.objects.get(name=db_name)
# clean sequence, remove FASTA stuff, junks
sequence = Sequence.clean_sequence(sequence)
# create sequence record
s = Sequence()
s.sequence = sequence
s.db = db
s.save()
# run blat algorithm to automatically detect features
s.clear_features()
frags.features.blat(db,s)
# detect ORFs
s.clear_orf_features()
orfs.detect_orfs(s)
return s.hash
def giraffe_ready(self,db_name='default',force=False,save=True):
if not self.sequence:
self.sequence_giraffe_id = ''
self.sequence_giraffe_time = None
if save:
self.save()
return
run = force
if not force:
(s,h) = Sequence.clean_and_hash(self.sequence)
if h != self.sequence_giraffe_id:
run = True
if run:
s = Giraffe_Mappable_Model.detect_features(self.sequence,db_name)
self.sequence_giraffe_id = s
self.sequence_giraffe_time = datetime.datetime.now()
if save:
self.save()
giraffe_ready.alters_data = True
def save(self,run_giraffe_ready=True):
if run_giraffe_ready:
self.giraffe_ready(save=False)
return super(Giraffe_Mappable_Model,self).save()
save.alters_data = True
class Sequence_Feature_Base(models.Model):
class Meta:
abstract = True
sequence = models.ForeignKey('Sequence')
start = models.PositiveIntegerField()
end = models.PositiveIntegerField()
clockwise = models.BooleanField()
def to_dict(self):
d = {
"start" : self.start,
"end" : self.end,
"clockwise" : self.clockwise,
}
return d
class Sequence_Feature(Sequence_Feature_Base):
feature_db_index = models.ForeignKey('Feature_DB_Index')
@property
def feature(self):
return self.feature_db_index.feature
# Gene variant info
subset_start = models.PositiveIntegerField(default=0)
subset_end = models.PositiveIntegerField(default=0)
is_variant = models.BooleanField(default=False)
has_gaps = models.BooleanField(default=False)
class Meta:
ordering = ['start','end']
def to_dict(self):
d = super(Sequence_Feature,self).to_dict()
d['feature'] = self.feature.name
d['feature_id'] = self.feature.id
d['type_id'] = self.feature.type_id
d['show_feature'] = self.feature_db_index.show_feature
# Include cut position
if d["type_id"] == Feature_Type.ENZYME:
if d["clockwise"]:
cp = d["start"] + (self.feature.cut_after - 1)
else:
cp = d["end"] - (self.feature.cut_after - 1)
slen = len(self.sequence.sequence)
if cp < 0:
cp += slen
if cp > slen:
cp %= slen
d["cut"] = cp
# Include gene variant name modifications
elif d["type_id"] == Feature_Type.GENE:
if self.is_variant:
d["feature"] = d["feature"] + " (variant)";
elif self.has_gaps:
d["feature"] = d["feature"] + " (w/ gaps)";
elif self.subset_end > 0:
if d["clockwise"]:
d["feature"] = "%s (%d - %d)" % (d["feature"], self.subset_start,
self.subset_end);
else:
d["feature"] = "%s (%d - %d)" % (d["feature"], self.subset_end,
self.subset_start);
return d
class Sequence(models.Model):
@staticmethod
def clean_sequence(sequence):
# Remove FASTA > and ; comments
#
# Remove only the first > comment, and any subsequent ; comments because
# those comments do not necessarily mean the start of a new sequence.
# Match the first line if it starts with > or ;, and subsequent lines
# only if they start with ;. Then match the rest of that line _up to but
# not including_ the line break (this is important so that multiple
# comments in a row can be detected)
# Note that multiple FASTA sequence-start comments will throw an error
sequence = re.sub(r'(^\s*[>;]|\n\s*[;])[^\n]+(?=\n)','',sequence);
# clean the sequence
sequence = re.sub(r'[^A-Za-z*-]', '', sequence)
return sequence
@staticmethod
def verify_bp(sequence):
if re.match(r'^([atgcATGCnNbdhkmnrsvwyBDHKMNRSVWYuU\s*-])*$',sequence):
return True
return False
@staticmethod
def convert_to_dna(sequence):
"""
Take a sequence we accept, e.g. with degenerates, and convert
it to a valid DNA sequence with just atgc.
"""
sequence = re.sub(r'[DHMNRVW*-]','A',sequence)
sequence = re.sub(r'[dhmnrvw]','a',sequence)
sequence = re.sub(r'[BYS]','C',sequence)
sequence = re.sub(r'[bys]','c',sequence)
sequence = re.sub(r'[K]','G',sequence)
sequence = re.sub(r'[k]','g',sequence)
sequence = re.sub(r'[U]','T',sequence)
sequence = re.sub(r'[u]','t',sequence)
return sequence
@staticmethod
def strip(sequence):
if sequence:
sequence = re.sub(r'\s', '', sequence)
else:
sequence = ''
return sequence
@staticmethod
def clean_and_hash(sequence):
"""
Takes a sequence, returns a tuple, the cleaned sequence and
the sequence hash.
"""
sequence = Sequence.strip(sequence)
hash = hashlib.sha1(sequence.lower()).hexdigest()
return (sequence,hash)
sequence = models.TextField()
hash = models.CharField(max_length=64,db_index=True)
modified = models.DateTimeField(auto_now=True)
db = models.ForeignKey('Feature_Database')
db_version = models.CharField(max_length=64)
class Meta:
unique_together = (("db","hash"),)
def save(self):
self.db_version = self.db.db_version
if not Sequence.verify_bp(self.sequence):
raise BadSequenceError("Found non-DNA base pair character")
(self.sequence,self.hash) = Sequence.clean_and_hash(self.sequence)
try:
super(Sequence,self).save()
except utils.IntegrityError:
Sequence.objects.filter(hash=self.hash,db=self.db).update(db_version=self.db_version)
s = Sequence.objects.get(hash=self.hash,db=self.db)
self.id = s.id
save.alters_data = True
def clear_features(self,feature_type=None):
a = { 'sequence' : self }
if feature_type:
a['feature__type'] = feature_type
Sequence_Feature.objects.filter(**a).delete()
clear_features.alters_data = True
def clear_annotated_features(self,feature_type=None):
a = { 'sequence' : self }
if feature_type:
a['feature_type'] = feature_type
Sequence_Feature_Annotated.objects.filter(**a).delete()
clear_annotated_features.alters_data = True
def clear_orf_features(self):
Sequence_Feature_Annotated.objects.filter(
feature_type=Feature_Type.ORF,sequence=self
).delete()
Sequence_Feature_Annotated.objects.filter(
orf_annotated__isnull=False,sequence=self
).delete()
clear_orf_features.alters_data = True
class Feature_Type(models.Model):
type = models.CharField(max_length=64)
# Feature type ID constants
(FEATURE, PROMOTER, PRIMER,
ENZYME, GENE, ORIGIN,
REGULATORY, TERMINATOR, EXACT_FEATURE,
ORF) = range(1, 11)
def __unicode__(self):
return self.type
class Meta:
verbose_name = "Feature Type"
class Feature(models.Model):
type = models.ForeignKey(Feature_Type)
name = models.CharField(max_length=32,db_index=True)
sequence = models.TextField()
hash = models.CharField(max_length=64)
cut_after = models.PositiveIntegerField(null=True,blank=True)
last_modified = models.DateTimeField(auto_now=True,db_index=True)
def save(self):
self.sequence = Sequence.strip(self.sequence)
self.hash = hashlib.sha1(self.sequence.lower()).hexdigest()
return super(Feature,self).save()
def __unicode__(self):
return self.name
class Meta:
unique_together = (("name","hash"),)
ordering = ('type','name')
class Feature_Database(models.Model):
name = models.CharField(max_length=64,unique=True)
features = models.ManyToManyField(Feature, through='Feature_In_Database')
db_version = models.CharField(max_length=64)
last_built = models.DateTimeField(null=True,blank=True)
class Feature_In_Database(models.Model):
feature = models.ForeignKey(Feature)
feature_database = models.ForeignKey(Feature_Database)
show_feature = models.BooleanField(default=True)
class Feature_DB_Index(models.Model):
db = models.ForeignKey(Feature_Database)
feature_index = models.PositiveIntegerField(db_index=True)
feature = models.ForeignKey(Feature)
antisense = models.BooleanField()
show_feature = models.BooleanField(default=True)
class Meta:
unique_together = (("db","feature_index"),)
class Sequence_Feature_Annotated(Sequence_Feature_Base):
feature_name = models.CharField(max_length=64)
feature_type = models.ForeignKey(Feature_Type)
orf_frame = models.PositiveIntegerField(null=True)
orf_annotated = models.ForeignKey('Sequence_Feature_Annotated',null=True)
def to_dict(self):
d = super(Sequence_Feature_Annotated,self).to_dict()
d['feature'] = self.feature_name
d['type_id'] = self.feature_type_id
if self.orf_frame:
d['orf_frame'] = self.orf_frame
return d
|
mit
|
Sunsoo/incodom
|
lib/bzrlib/merge3.py
|
5
|
16871
|
# Copyright (C) 2004, 2005 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
# from bzrlib.errors import CantReprocessAndShowBase
import bzrlib.patiencediff
# from bzrlib.textfile import check_text_lines
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
>>> intersect((0, 10), (0, 6))
(0, 6)
>>> intersect((0, 10), (5, 15))
(5, 10)
>>> intersect((0, 10), (10, 15))
>>> intersect((0, 9), (10, 15))
>>> intersect((0, 9), (7, 15))
(7, 9)
"""
# preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
else:
return None
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
"""
if (aend-astart) != (bend-bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
class Merge3(object):
"""3-way merge of texts.
Given BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS.
All three will typically be sequences of lines."""
def __init__(self, base, a, b, is_cherrypick=False):
# check_text_lines(base)
# check_text_lines(a)
# check_text_lines(b)
self.base = base
self.a = a
self.b = b
self.is_cherrypick = is_cherrypick
def merge_lines(self,
name_a=None,
name_b=None,
name_base=None,
start_marker='<<<<<<<',
mid_marker='=======',
end_marker='>>>>>>>',
base_marker=None,
reprocess=False):
"""Return merge in cvs-like form.
"""
newline = '\n'
if len(self.a) > 0:
if self.a[0].endswith('\r\n'):
newline = '\r\n'
elif self.a[0].endswith('\r'):
newline = '\r'
if base_marker and reprocess:
raise "CantReprocessAndShowBase"
if name_a:
start_marker = start_marker + ' ' + name_a
if name_b:
end_marker = end_marker + ' ' + name_b
if name_base and base_marker:
base_marker = base_marker + ' ' + name_base
merge_regions = self.merge_regions()
if reprocess is True:
merge_regions = self.reprocess_merge_regions(merge_regions)
for t in merge_regions:
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
yield start_marker + newline
for i in range(t[3], t[4]):
yield self.a[i]
if base_marker is not None:
yield base_marker + newline
for i in range(t[1], t[2]):
yield self.base[i]
yield mid_marker + newline
for i in range(t[5], t[6]):
yield self.b[i]
yield end_marker + newline
else:
raise ValueError(what)
def merge_annotated(self):
"""Return merge with conflicts, showing origin of lines.
Most useful for debugging merge.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield 'u | ' + self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield what[0] + ' | ' + self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield 'b | ' + self.b[i]
elif what == 'conflict':
yield '<<<<\n'
for i in range(t[3], t[4]):
yield 'A | ' + self.a[i]
yield '----\n'
for i in range(t[5], t[6]):
yield 'B | ' + self.b[i]
yield '>>>>\n'
else:
raise ValueError(what)
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
'unchanged', lines
Lines unchanged from base
'a', lines
Lines taken from a
'same', lines
Lines taken from a (and equal to b)
'b', lines
Lines taken from b
'conflict', base_lines, a_lines, b_lines
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
yield what, self.base[t[1]:t[2]]
elif what == 'a' or what == 'same':
yield what, self.a[t[1]:t[2]]
elif what == 'b':
yield what, self.b[t[1]:t[2]]
elif what == 'conflict':
yield (what,
self.base[t[1]:t[2]],
self.a[t[3]:t[4]],
self.b[t[5]:t[6]])
else:
raise ValueError(what)
def merge_regions(self):
"""Return sequences of matching and conflicting regions.
This returns tuples, where the first value says what kind we
have:
'unchanged', start, end
Take a region of base[start:end]
'same', astart, aend
b and a are different from base but give the same result
'a', start, end
Non-clashing insertion from a[start:end]
Method is as follows:
The two sequences align only on regions which match the base
and both descendents. These are found by doing a two-way diff
of each one against the base, and then finding the
intersections between those regions. These "sync regions"
are by definition unchanged in both and easily dealt with.
The regions in between can be in any of three cases:
conflicted, or changed on only one side.
"""
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = compare_range(self.a, ia, amatch,
self.b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = compare_range(self.a, ia, amatch,
self.base, iz, zmatch)
equal_b = compare_range(self.b, ib, bmatch,
self.base, iz, zmatch)
if equal_a and not equal_b:
yield 'b', ib, bmatch
elif equal_b and not equal_a:
yield 'a', ia, amatch
elif not equal_a and not equal_b:
if self.is_cherrypick:
for node in self._refine_cherrypick_conflict(
iz, zmatch, ia, amatch,
ib, bmatch):
yield node
else:
yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
else:
raise AssertionError("can't handle a=b=base but unmatched")
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'unchanged', zmatch, zend
iz = zend
ia = aend
ib = bend
def _refine_cherrypick_conflict(self, zstart, zend, astart, aend, bstart, bend):
"""When cherrypicking b => a, ignore matches with b and base."""
# Do not emit regions which match, only regions which do not match
matches = bzrlib.patiencediff.PatienceSequenceMatcher(None,
self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
last_base_idx = 0
last_b_idx = 0
last_b_idx = 0
yielded_a = False
for base_idx, b_idx, match_len in matches:
conflict_z_len = base_idx - last_base_idx
conflict_b_len = b_idx - last_b_idx
if conflict_b_len == 0: # There are no lines in b which conflict,
# so skip it
pass
else:
if yielded_a:
yield ('conflict',
zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart +
base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
last_base_idx = base_idx + match_len
last_b_idx = b_idx + match_len
if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
if yielded_a:
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
if not yielded_a:
yield ('conflict', zstart, zend, astart, aend, bstart, bend)
def reprocess_merge_regions(self, merge_regions):
"""Where there are conflict regions, remove the agreed lines.
Lines where both A and B have made the same changes are
eliminated.
"""
for region in merge_regions:
if region[0] != "conflict":
yield region
continue
type, iz, zmatch, ia, amatch, ib, bmatch = region
a_region = self.a[ia:amatch]
b_region = self.b[ib:bmatch]
matches = bzrlib.patiencediff.PatienceSequenceMatcher(
None, a_region, b_region).get_matching_blocks()
next_a = ia
next_b = ib
for region_ia, region_ib, region_len in matches[:-1]:
region_ia += ia
region_ib += ib
reg = self.mismatch_region(next_a, region_ia, next_b,
region_ib)
if reg is not None:
yield reg
yield 'same', region_ia, region_len+region_ia
next_a = region_ia + region_len
next_b = region_ib + region_len
reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
if reg is not None:
yield reg
@staticmethod
def mismatch_region(next_a, region_ia, next_b, region_ib):
if next_a < region_ia or next_b < region_ib:
return 'conflict', None, None, next_a, region_ia, next_b, region_ib
def find_sync_regions(self):
"""Return a list of sync regions, where both descendents match the base.
Generates a list of (base1, base2, a1, a2, b1, b2). There is
always a zero-length sync region at the end of all the files.
"""
ia = ib = 0
amatches = bzrlib.patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bmatches = bzrlib.patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = intersect((abase, abase+alen), (bbase, bbase+blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert self.base[intbase:intend] == self.a[asub:aend], \
# (self.base[intbase:intend], self.a[asub:aend])
# assert self.base[intbase:intend] == self.b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
def find_unconflicted(self):
"""Return a list of ranges in base that are not conflicted."""
am = bzrlib.patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bm = bzrlib.patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
unc = []
while am and bm:
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
a1 = am[0][0]
a2 = a1 + am[0][2]
b1 = bm[0][0]
b2 = b1 + bm[0][2]
i = intersect((a1, a2), (b1, b2))
if i:
unc.append(i)
if a2 < b2:
del am[0]
else:
del bm[0]
return unc
def main(argv):
# as for diff3 and meld the syntax is "MINE BASE OTHER"
a = file(argv[1], 'rt').readlines()
base = file(argv[2], 'rt').readlines()
b = file(argv[3], 'rt').readlines()
m3 = Merge3(base, a, b)
#for sr in m3.find_sync_regions():
# print sr
# sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
sys.stdout.writelines(m3.merge_annotated())
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
gpl-3.0
|
wannabeCitizen/quantifiedSelf
|
app/services_auth/spotify_auth.py
|
2
|
1324
|
from tornado import gen
from . import spotifyMix as spot
from lib.database.auth import save_token
from lib.basehandler import OAuthRequestHandler
class SpotifyAuth(OAuthRequestHandler, spot.SpotifyOAuth2Mixin):
scope = [
'playlist-read-private',
'playlist-read-collaborative',
'user-follow-read',
'user-library-read',
'user-read-birthdate',
'user-read-email',
]
def initialize(self):
super(SpotifyAuth, self).setProvider("spotify")
def startFlow(self):
uri = '{0}/auth/spotify'.format(self.application.settings['base_url'])
self.authorize_redirect(
redirect_uri=uri,
client_id=self.application.settings['spotify_oauth']['key'],
response_type='code',
scope=self.scope,
)
@gen.coroutine
def handleAuthCallBack(self, code, user_id):
redir_uri = '{0}/auth/spotify'.format(
self.application.settings['base_url'])
access = yield self.get_authenticated_user(
redirect_uri=redir_uri,
code=code)
# from here use spotipy - pass it over to a scraper context
yield save_token(
provider="spotify",
user_id=user_id,
token_data=access
)
|
mit
|
grilo/ansible-1
|
lib/ansible/plugins/action/set_stats.py
|
61
|
2792
|
# Copyright 2016 Ansible (RedHat, Inc)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.utils.vars import isidentifier
class ActionModule(ActionBase):
TRANSFERS_FILES = False
# TODO: document this in non-empty set_stats.py module
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
stats = {'data': {}, 'per_host': False, 'aggregate': True}
if self._task.args:
data = self._task.args.get('data', {})
if not isinstance(data, dict):
data = self._templar.template(data, convert_bare=False, fail_on_undefined=True)
if not isinstance(data, dict):
result['failed'] = True
result['msg'] = "The 'data' option needs to be a dictionary/hash"
return result
# set boolean options, defaults are set above in stats init
for opt in ['per_host', 'aggregate']:
val = self._task.args.get(opt, None)
if val is not None:
if not isinstance(val, bool):
stats[opt] = boolean(self._templar.template(val), strict=False)
else:
stats[opt] = val
for (k, v) in iteritems(data):
k = self._templar.template(k)
if not isidentifier(k):
result['failed'] = True
result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
"letters, numbers and underscores." % k)
return result
stats['data'][k] = self._templar.template(v)
result['changed'] = False
result['ansible_stats'] = stats
return result
|
gpl-3.0
|
tkasp/osmose-backend
|
analysers/analyser_osmosis_highway_motorway.py
|
4
|
3109
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2016 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Osmosis import Analyser_Osmosis
sql10 = """
SELECT DISTINCT ON (highways.id)
highways.id,
ST_AsText(nodes.geom)
FROM
{0}highways AS motorways
JOIN {1}highways AS highways ON
highways.linestring && motorways.linestring AND
highways.nodes && motorways.nodes AND
highways.highway NOT IN ('motorway', 'motorway_link', 'trunk', 'trunk_link', 'primary', 'primary_link', 'escape') AND
NOT(highways.tags?'access' AND highways.tags->'access' IN ('no', 'private', 'emergency')) AND
NOT(highways.highway = 'service' AND highways.tags->'service' = 'emergency_access') AND
NOT highways.is_construction
JOIN nodes ON
nodes.id = (SELECT * FROM (SELECT unnest(highways.nodes) INTERSECT SELECT unnest(motorways.nodes)) AS t LIMIT 1)
WHERE
motorways.highway = 'motorway'
"""
class Analyser_Osmosis_Highway_Motorway(Analyser_Osmosis):
requires_tables_full = ['highways']
requires_tables_diff = ['highways', 'touched_highways', 'not_touched_highways']
def __init__(self, config, logger = None):
Analyser_Osmosis.__init__(self, config, logger)
self.classs_change[1] = self.def_class(item = 3220, level = 1, tags = ['tag', 'highway', 'fix:chair'],
title = T_('Direct or too permissive access to motorway'))
self.callback10 = lambda res: {"class":1, "data":[self.way_full, self.positionAsText]}
def analyser_osmosis_full(self):
self.run(sql10.format("", ""), self.callback10)
def analyser_osmosis_diff(self):
self.run(sql10.format("touched_", "not_touched_"), self.callback10)
self.run(sql10.format("", "touched_"), self.callback10)
|
gpl-3.0
|
NeCTAR-RC/neutron
|
neutron/plugins/vmware/dbexts/nsx_models.py
|
26
|
10891
|
# Copyright 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NSX data models.
This module defines data models used by the VMware NSX plugin family.
"""
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
class TzNetworkBinding(model_base.BASEV2):
"""Represents a binding of a virtual network with a transport zone.
This model class associates a Neutron network with a transport zone;
optionally a vlan ID might be used if the binding type is 'bridge'
"""
__tablename__ = 'tz_network_bindings'
# TODO(arosen) - it might be worth while refactoring the how this data
# is stored later so every column does not need to be a primary key.
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'flat', 'vlan', stt' or 'gre'
binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type'),
nullable=False, primary_key=True)
phy_uuid = sa.Column(sa.String(36), primary_key=True, default='')
vlan_id = sa.Column(sa.Integer, primary_key=True,
autoincrement=False, default=0)
def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
self.network_id = network_id
self.binding_type = binding_type
self.phy_uuid = phy_uuid
self.vlan_id = vlan_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
self.binding_type,
self.phy_uuid,
self.vlan_id)
class NeutronNsxNetworkMapping(model_base.BASEV2):
"""Maps neutron network identifiers to NSX identifiers.
Because of chained logical switches more than one mapping might exist
for a single Neutron network.
"""
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxSecurityGroupMapping(model_base.BASEV2):
"""Backend mappings for Neutron Security Group identifiers.
This class maps a neutron security group identifier to the corresponding
NSX security profile identifier.
"""
__tablename__ = 'neutron_nsx_security_group_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('securitygroups.id',
ondelete="CASCADE"),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nsx port uuids."""
__tablename__ = 'neutron_nsx_port_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nsx_switch_id = sa.Column(sa.String(36))
nsx_port_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
self.neutron_id = neutron_id
self.nsx_switch_id = nsx_switch_id
self.nsx_port_id = nsx_port_id
class NeutronNsxRouterMapping(model_base.BASEV2):
"""Maps neutron router identifiers to NSX identifiers."""
__tablename__ = 'neutron_nsx_router_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36))
class MultiProviderNetworks(model_base.BASEV2):
"""Networks provisioned through multiprovider extension."""
__tablename__ = 'multi_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
def __init__(self, network_id):
self.network_id = network_id
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
"""Defines a connection between a network gateway and a network."""
# We use port_id as the primary key as one can connect a gateway
# to a network in multiple ways (and we cannot use the same port form
# more than a single gateway)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),
model_base.BASEV2.__table_args__)
# Also, storing port id comes back useful when disconnecting a network
# from a gateway
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
class NetworkGatewayDeviceReference(model_base.BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'),
primary_key=True)
interface_name = sa.Column(sa.String(64), primary_key=True)
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Defines the data model for a network gateway."""
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
class MacLearningState(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model using the backref attribute.
# This will instruct SQLAlchemy to eagerly load this association.
port = orm.relationship(
models_v2.Port,
backref=orm.backref("mac_learning_state", lazy='joined',
uselist=False, cascade='delete'))
class LsnPort(models_v2.model_base.BASEV2):
__tablename__ = 'lsn_port'
lsn_port_id = sa.Column(sa.String(36), primary_key=True)
lsn_id = sa.Column(sa.String(36),
sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"),
nullable=False)
sub_id = sa.Column(sa.String(36), nullable=False, unique=True)
mac_addr = sa.Column(sa.String(32), nullable=False, unique=True)
def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id):
self.lsn_port_id = lsn_port_id
self.lsn_id = lsn_id
self.sub_id = subnet_id
self.mac_addr = mac_address
class Lsn(models_v2.model_base.BASEV2):
__tablename__ = 'lsn'
lsn_id = sa.Column(sa.String(36), primary_key=True)
net_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, net_id, lsn_id):
self.net_id = net_id
self.lsn_id = lsn_id
class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
min = sa.Column(sa.Integer, nullable=False)
max = sa.Column(sa.Integer, nullable=True)
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
name='qosqueues_qos_marking'))
dscp = sa.Column(sa.Integer)
class PortQueueMapping(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
primary_key=True)
# Add a relationship to the Port model adding a backref which will
# allow SQLAlchemy for eagerly load the queue binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
class NetworkQueueMapping(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
ondelete="CASCADE"))
# Add a relationship to the Network model adding a backref which will
# allow SQLAlcremy for eagerly load the queue binding
network = orm.relationship(
models_v2.Network,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
|
apache-2.0
|
ice9js/servo
|
tests/wpt/web-platform-tests/tools/wptserve/wptserve/handlers.py
|
44
|
13193
|
import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = urllib.unquote(request.url_parts.path)
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</ul>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
ml_extensions = {".html", ".htm", ".xht", ".xhtml", ".xml", ".svg"}
escape_type = "html" if os.path.splitext(path)[1] in ml_extensions else "none"
pipeline = Pipeline("sub(%s)" % escape_type)
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data, escape_type="none")
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
"""Return either the handle to a file, or a string containing
the content of a chunk of the file, if we have a range request."""
if byte_ranges is None:
return open(path, 'rb')
else:
with open(path, 'rb') as f:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
|
mpl-2.0
|
Proxmark/proxmark3
|
tools/xorcheck.py
|
26
|
1624
|
#!/usr/bin/python
# xorcheck.py - find xor values for 8-bit LRC
#
# Adam Laurie <adam@algroup.co.uk>
# http://rfidiot.org/
#
# This code is copyright (c) Adam Laurie, 2009, All rights reserved.
# For non-commercial use only, the following terms apply - for all other
# uses, please contact the author:
#
# This code is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
import sys
import os
if(len(sys.argv) < 3):
print
print '\t'+sys.argv[0] + ' - Generate final byte for XOR LRC'
print
print 'Usage: ' + sys.argv[0] + ' <ID Byte1> <ID Byte2> ... <LRC>'
print
print '\tSpecifying the bytes of a UID with a known LRC will find the last byte value'
print '\tneeded to generate that LRC with a rolling XOR. All bytes should be specified in HEX.'
print
print 'Example:'
print
print '\txorcheck.py 04 00 80 64 ba'
print
print 'Should produce the output:'
print
print '\tTarget (BA) requires final LRC XOR byte value: 5A'
print
os._exit(True)
target= int(sys.argv[len(sys.argv) - 1],16)
lrc= 0x00
for i in range(len(sys.argv) - 1):
lrc ^= int(sys.argv[i + 1],16)
print
print 'Target (%02X) requires final LRC XOR byte value: %02X' % (target,lrc)
print
|
gpl-2.0
|
cemc/python3jail
|
static/maketrace/p4_encoder.py
|
1
|
6708
|
#!/usr/bin/python3 -u
# given an arbitrary piece of Python data, encode it in such a manner
# that it can be later encoded into JSON.
# http://json.org/
#
# Format:
# * None, int, float, str, bool - unchanged (long is removed in Python 3)
# (json.dumps encodes these fine verbatim)
# * list - ['LIST', unique_id, elt1, elt2, elt3, ..., eltN]
# * tuple - ['TUPLE', unique_id, elt1, elt2, elt3, ..., eltN]
# * set - ['SET', unique_id, elt1, elt2, elt3, ..., eltN]
# * dict - ['DICT', unique_id, [key1, value1], [key2, value2], ..., [keyN, valueN]]
# * instance - ['INSTANCE', class name, unique_id, [attr1, value1], [attr2, value2], ..., [attrN, valueN]]
# * class - ['CLASS', class name, unique_id, [list of superclass names], [attr1, value1], [attr2, value2], ..., [attrN, valueN]]
# * circular reference - ['CIRCULAR_REF', unique_id]
# * other - [<type name>, unique_id, string representation of object]
#
#
# the unique_id is derived from id(), which allows us to explicitly
# capture aliasing of compound values
# Key: real ID from id()
# Value: a small integer for greater readability, set by cur_small_id
real_to_small_IDs = {}
cur_small_id = 1
import re, types
#typeRE = re.compile("<type '(.*)'>") # not used in Python 3
classRE = re.compile("<class '(.*)'>")
functionRE = re.compile("<function (\w*) (.*)>") # new case for Python 3
# When we find a <class x> and x is in this list, don't confuse the beginner by listing the inner details
native_types = ['int', 'float', 'str', 'tuple', 'list', 'set', 'dict', 'bool', 'NoneType', 'bytes', 'type', 'object']
def encode(dat, ignore_id=False):
def append_attributes(ret, new_compound_obj_ids, dict):
""" Put attributes onto ret. """
# traverse the __dict__ to grab attributes
# (filter out useless-seeming ones):
user_attrs = sorted([e for e in dict.keys()
if e not in {'__doc__', '__module__', '__return__', '__locals__',
'__weakref__', '__dict__', '__qualname__'}
])
for attr in user_attrs:
foo = [encode_helper(attr, new_compound_obj_ids),
encode_helper(dict[attr], new_compound_obj_ids)]
ret.append(foo)
def encode_helper(dat, compound_obj_ids):
# primitive type
if dat is None or type(dat) in (int, float, str, bool):
return dat
# compound type
else:
my_id = id(dat)
global cur_small_id
if my_id not in real_to_small_IDs:
if ignore_id:
real_to_small_IDs[my_id] = 99999
else:
real_to_small_IDs[my_id] = cur_small_id
cur_small_id += 1
if my_id in compound_obj_ids:
return ['CIRCULAR_REF', real_to_small_IDs[my_id]]
new_compound_obj_ids = compound_obj_ids.union([my_id])
typ = type(dat)
obj_as_string = object.__repr__(dat)
my_small_id = real_to_small_IDs[my_id]
if typ == list:
ret = ['LIST', my_small_id]
for e in dat: ret.append(encode_helper(e, new_compound_obj_ids))
elif typ == tuple:
ret = ['TUPLE', my_small_id]
for e in dat: ret.append(encode_helper(e, new_compound_obj_ids))
elif typ == set:
ret = ['SET', my_small_id]
for e in dat: ret.append(encode_helper(e, new_compound_obj_ids))
elif typ == dict:
ret = ['DICT', my_small_id]
append_attributes(ret, new_compound_obj_ids, dat)
elif typ == type: # its a class. What a mess they made of it!
superclass_names = [e.__name__ for e in dat.__bases__]
ret = ['CLASS', my_small_id, dat.__name__, superclass_names]
if dat.__name__ not in native_types:
if hasattr(dat, '__dict__'):
append_attributes(ret, new_compound_obj_ids, dat.__dict__)
elif repr(dat).startswith("<module "):
ret = ['INSTANCE', my_small_id, 'module']
provides = ' '.join([i for i in dir(dat) if not i.startswith('_')])
append_attributes(ret, new_compound_obj_ids, {'name':dat.__name__, 'dir': provides})
elif repr(typ).startswith("<class ") and obj_as_string.find('object') >= 0: # is it an instance?
ret = ['INSTANCE', my_small_id, dat.__class__.__name__]
if hasattr(dat, '__dict__'):
append_attributes(ret, new_compound_obj_ids, dat.__dict__)
else:
typeStr = repr(typ)
m = classRE.match(typeStr)
assert m, typ
ret = [m.group(1), my_small_id , obj_as_string]
return ret
return encode_helper(dat, set())
if __name__ == '__main__':
def test(actual, expected=0):
""" Compare the actual to the expected value, and print a suitable message. """
import sys
linenum = sys._getframe(1).f_lineno # get the caller's line number.
if (expected == actual):
msg = "Test on line %s passed." % (linenum)
else:
msg = "Test on line %s failed. Expected '%s', but got '%s'." % (linenum, expected, actual)
print(msg)
class P():
p_attr1 = 123
def p_method(self, x):
return 2*x
class Q(P):
pass
p1 = P()
q1 = Q()
addr = 1
test(encode("hello"),"hello")
test(encode(123),123)
test(encode(123.45),123.45)
test(encode(132432134423143132432134423143),132432134423143132432134423143)
test(encode(False),False)
test(encode(None),None)
test(encode((1,2)), ['TUPLE', addr, 1, 2])
addr += 1
test(encode([1,2]), ['LIST', addr, 1, 2])
addr += 1
test(encode({1:'mon'}), ['DICT', addr, [1, 'mon']])
addr += 1
test(encode(test), ['function', addr, 'test'])
addr += 1
test(encode(P), ['CLASS', 'P', addr, ['object'], ['p_attr1', 123], ['p_method', ['function', addr+1, 'p_method']]])
addr += 2
test(encode(Q), ['CLASS', 'Q', addr, ['P']])
addr += 1
test(encode(p1), ['INSTANCE', 'P', addr])
addr += 1
test(encode(q1), ['INSTANCE', 'Q', addr])
addr += 1
test(encode(min), ['builtin_function_or_method', addr, '<built-in function min>'] )
addr += 1
test(encode(range(1,3)), ['range', addr, 'range(1, 3)'])
addr += 1
test(encode({1,2}), ['SET', addr, 1, 2])
addr += 1
p = [1,2,3]
p.append(p) # make a circular reference
test(encode(p), ['LIST', addr, 1, 2, 3, ['CIRCULAR_REF', addr]])
# Need some new tests for z = type(123)
print(encode({"stdout": "", "func_name": "<module>", "globals": {"sum": 0, "friends": ["LIST", 1, "Joe", "Bill"], "length": 3, "f": "Joe"}, "stack_locals": [], "line": 7, "event": "step_line"}))
|
gpl-3.0
|
tuhangdi/django
|
django/contrib/syndication/views.py
|
137
|
8793
|
from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404, HttpResponse
from django.template import TemplateDoesNotExist, loader
from django.utils import feedgenerator, six
from django.utils.encoding import force_text, iri_to_uri, smart_text
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not url.startswith(('http://', 'https://', 'mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.content_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def item_enclosures(self, item):
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=smart_text(enc_url),
length=smart_text(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type=smart_text(self.__get_dynamic_attr('item_enclosure_mime_type', item)),
)
return [enc]
return []
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self.__get_dynamic_attr('title', obj),
subtitle=self.__get_dynamic_attr('subtitle', obj),
link=link,
description=self.__get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self.__get_dynamic_attr('author_name', obj),
author_link=self.__get_dynamic_attr('author_link', obj),
author_email=self.__get_dynamic_attr('author_email', obj),
categories=self.__get_dynamic_attr('categories', obj),
feed_copyright=self.__get_dynamic_attr('feed_copyright', obj),
feed_guid=self.__get_dynamic_attr('feed_guid', obj),
ttl=self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(context, request)
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(context, request)
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enclosures = self.__get_dynamic_attr('item_enclosures', item)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self.__get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self.__get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self.__get_dynamic_attr(
'item_guid_is_permalink', item),
enclosures=enclosures,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self.__get_dynamic_attr('item_categories', item),
item_copyright=self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
|
bsd-3-clause
|
peterm-itr/edx-platform
|
lms/djangoapps/shoppingcart/migrations/0006_auto__add_field_order_refunded_time__add_field_orderitem_refund_reques.py
|
114
|
9755
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.refunded_time'
db.add_column('shoppingcart_order', 'refunded_time',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'OrderItem.refund_requested_time'
db.add_column('shoppingcart_orderitem', 'refund_requested_time',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.refunded_time'
db.delete_column('shoppingcart_order', 'refunded_time')
# Deleting field 'OrderItem.refund_requested_time'
db.delete_column('shoppingcart_orderitem', 'refund_requested_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
agpl-3.0
|
abartlet/samba-old
|
third_party/dnspython/dns/rcode.py
|
100
|
3105
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Result Codes."""
import dns.exception
NOERROR = 0
FORMERR = 1
SERVFAIL = 2
NXDOMAIN = 3
NOTIMP = 4
REFUSED = 5
YXDOMAIN = 6
YXRRSET = 7
NXRRSET = 8
NOTAUTH = 9
NOTZONE = 10
BADVERS = 16
_by_text = {
'NOERROR' : NOERROR,
'FORMERR' : FORMERR,
'SERVFAIL' : SERVFAIL,
'NXDOMAIN' : NXDOMAIN,
'NOTIMP' : NOTIMP,
'REFUSED' : REFUSED,
'YXDOMAIN' : YXDOMAIN,
'YXRRSET' : YXRRSET,
'NXRRSET' : NXRRSET,
'NOTAUTH' : NOTAUTH,
'NOTZONE' : NOTZONE,
'BADVERS' : BADVERS
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be a true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownRcode(dns.exception.DNSException):
"""Raised if an rcode is unknown."""
pass
def from_text(text):
"""Convert text into an rcode.
@param text: the texual rcode
@type text: string
@raises UnknownRcode: the rcode is unknown
@rtype: int
"""
if text.isdigit():
v = int(text)
if v >= 0 and v <= 4095:
return v
v = _by_text.get(text.upper())
if v is None:
raise UnknownRcode
return v
def from_flags(flags, ednsflags):
"""Return the rcode value encoded by flags and ednsflags.
@param flags: the DNS flags
@type flags: int
@param ednsflags: the EDNS flags
@type ednsflags: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: int
"""
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value
def to_flags(value):
"""Return a (flags, ednsflags) tuple which encodes the rcode.
@param value: the rcode
@type value: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: (int, int) tuple
"""
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
v = value & 0xf
ev = long(value & 0xff0) << 20
return (v, ev)
def to_text(value):
"""Convert rcode into text.
@param value: the rcode
@type value: int
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
|
gpl-3.0
|
jsynacek/systemd
|
test/networkd-test.py
|
5
|
45254
|
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1+
#
# networkd integration test
# This uses temporary configuration in /run and temporary veth devices, and
# does not write anything on disk or change any system configuration;
# but it assumes (and checks at the beginning) that networkd is not currently
# running.
#
# This can be run on a normal installation, in QEMU, nspawn (with
# --private-network), LXD (with "--config raw.lxc=lxc.aa_profile=unconfined"),
# or LXC system containers. You need at least the "ip" tool from the iproute
# package; it is recommended to install dnsmasq too to get full test coverage.
#
# ATTENTION: This uses the *installed* networkd, not the one from the built
# source tree.
#
# © 2015 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import unittest
HAVE_DNSMASQ = shutil.which('dnsmasq') is not None
IS_CONTAINER = subprocess.call(['systemd-detect-virt', '--quiet', '--container']) == 0
NETWORK_UNITDIR = '/run/systemd/network'
NETWORKD_WAIT_ONLINE = shutil.which('systemd-networkd-wait-online',
path='/usr/lib/systemd:/lib/systemd')
RESOLV_CONF = '/run/systemd/resolve/resolv.conf'
tmpmounts = []
running_units = []
stopped_units = []
def setUpModule():
global tmpmounts
"""Initialize the environment, and perform sanity checks on it."""
if NETWORKD_WAIT_ONLINE is None:
raise OSError(errno.ENOENT, 'systemd-networkd-wait-online not found')
# Do not run any tests if the system is using networkd already and it's not virtualized
if (subprocess.call(['systemctl', 'is-active', '--quiet', 'systemd-networkd.service']) == 0 and
subprocess.call(['systemd-detect-virt', '--quiet']) != 0):
raise unittest.SkipTest('not virtualized and networkd is already active')
# Ensure we don't mess with an existing networkd config
for u in ['systemd-networkd.socket', 'systemd-networkd', 'systemd-resolved']:
if subprocess.call(['systemctl', 'is-active', '--quiet', u]) == 0:
subprocess.call(['systemctl', 'stop', u])
running_units.append(u)
else:
stopped_units.append(u)
# create static systemd-network user for networkd-test-router.service (it
# needs to do some stuff as root and can't start as user; but networkd
# still insists on the user)
if subprocess.call(['getent', 'passwd', 'systemd-network']) != 0:
subprocess.call(['useradd', '--system', '--no-create-home', 'systemd-network'])
for d in ['/etc/systemd/network', '/run/systemd/network',
'/run/systemd/netif', '/run/systemd/resolve']:
if os.path.isdir(d):
subprocess.check_call(["mount", "-t", "tmpfs", "none", d])
tmpmounts.append(d)
if os.path.isdir('/run/systemd/resolve'):
os.chmod('/run/systemd/resolve', 0o755)
shutil.chown('/run/systemd/resolve', 'systemd-resolve', 'systemd-resolve')
if os.path.isdir('/run/systemd/netif'):
os.chmod('/run/systemd/netif', 0o755)
shutil.chown('/run/systemd/netif', 'systemd-network', 'systemd-network')
# Avoid "Failed to open /dev/tty" errors in containers.
os.environ['SYSTEMD_LOG_TARGET'] = 'journal'
# Ensure the unit directory exists so tests can dump files into it.
os.makedirs(NETWORK_UNITDIR, exist_ok=True)
def tearDownModule():
global tmpmounts
for d in tmpmounts:
subprocess.check_call(["umount", d])
for u in stopped_units:
subprocess.call(["systemctl", "stop", u])
for u in running_units:
subprocess.call(["systemctl", "restart", u])
class NetworkdTestingUtilities:
"""Provide a set of utility functions to facilitate networkd tests.
This class must be inherited along with unittest.TestCase to define
some required methods.
"""
def add_veth_pair(self, veth, peer, veth_options=(), peer_options=()):
"""Add a veth interface pair, and queue them to be removed."""
subprocess.check_call(['ip', 'link', 'add', 'name', veth] +
list(veth_options) +
['type', 'veth', 'peer', 'name', peer] +
list(peer_options))
self.addCleanup(subprocess.call, ['ip', 'link', 'del', 'dev', peer])
def write_config(self, path, contents):
""""Write a configuration file, and queue it to be removed."""
with open(path, 'w') as f:
f.write(contents)
self.addCleanup(os.remove, path)
def write_network(self, unit_name, contents):
"""Write a network unit file, and queue it to be removed."""
self.write_config(os.path.join(NETWORK_UNITDIR, unit_name), contents)
def write_network_dropin(self, unit_name, dropin_name, contents):
"""Write a network unit drop-in, and queue it to be removed."""
dropin_dir = os.path.join(NETWORK_UNITDIR, "{}.d".format(unit_name))
dropin_path = os.path.join(dropin_dir, "{}.conf".format(dropin_name))
os.makedirs(dropin_dir, exist_ok=True)
self.addCleanup(os.rmdir, dropin_dir)
with open(dropin_path, 'w') as dropin:
dropin.write(contents)
self.addCleanup(os.remove, dropin_path)
def read_attr(self, link, attribute):
"""Read a link attributed from the sysfs."""
# Note we we don't want to check if interface `link' is managed, we
# want to evaluate link variable and pass the value of the link to
# assert_link_states e.g. eth0=managed.
self.assert_link_states(**{link:'managed'})
with open(os.path.join('/sys/class/net', link, attribute)) as f:
return f.readline().strip()
def assert_link_states(self, **kwargs):
"""Match networkctl link states to the given ones.
Each keyword argument should be the name of a network interface
with its expected value of the "SETUP" column in output from
networkctl. The interfaces have five seconds to come online
before the check is performed. Every specified interface must
be present in the output, and any other interfaces found in the
output are ignored.
A special interface state "managed" is supported, which matches
any value in the "SETUP" column other than "unmanaged".
"""
if not kwargs:
return
interfaces = set(kwargs)
# Wait for the requested interfaces, but don't fail for them.
subprocess.call([NETWORKD_WAIT_ONLINE, '--timeout=5'] +
['--interface={}'.format(iface) for iface in kwargs])
# Validate each link state found in the networkctl output.
out = subprocess.check_output(['networkctl', '--no-legend']).rstrip()
for line in out.decode('utf-8').split('\n'):
fields = line.split()
if len(fields) >= 5 and fields[1] in kwargs:
iface = fields[1]
expected = kwargs[iface]
actual = fields[-1]
if (actual != expected and
not (expected == 'managed' and actual != 'unmanaged')):
self.fail("Link {} expects state {}, found {}".format(iface, expected, actual))
interfaces.remove(iface)
# Ensure that all requested interfaces have been covered.
if interfaces:
self.fail("Missing links in status output: {}".format(interfaces))
class BridgeTest(NetworkdTestingUtilities, unittest.TestCase):
"""Provide common methods for testing networkd against servers."""
def setUp(self):
self.write_network('port1.netdev', '''\
[NetDev]
Name=port1
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('port2.netdev', '''\
[NetDev]
Name=port2
Kind=dummy
MACAddress=12:34:56:78:9a:bd''')
self.write_network('mybridge.netdev', '''\
[NetDev]
Name=mybridge
Kind=bridge''')
self.write_network('port1.network', '''\
[Match]
Name=port1
[Network]
Bridge=mybridge''')
self.write_network('port2.network', '''\
[Match]
Name=port2
[Network]
Bridge=mybridge''')
self.write_network('mybridge.network', '''\
[Match]
Name=mybridge
[Network]
DNS=192.168.250.1
Address=192.168.250.33/24
Gateway=192.168.250.1''')
subprocess.call(['systemctl', 'reset-failed', 'systemd-networkd', 'systemd-resolved'])
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
def tearDown(self):
subprocess.check_call(['systemctl', 'stop', 'systemd-networkd'])
subprocess.check_call(['ip', 'link', 'del', 'mybridge'])
subprocess.check_call(['ip', 'link', 'del', 'port1'])
subprocess.check_call(['ip', 'link', 'del', 'port2'])
def test_bridge_init(self):
self.assert_link_states(
port1='managed',
port2='managed',
mybridge='managed')
def test_bridge_port_priority(self):
self.assertEqual(self.read_attr('port1', 'brport/priority'), '32')
self.write_network_dropin('port1.network', 'priority', '''\
[Bridge]
Priority=28
''')
subprocess.check_call(['systemctl', 'restart', 'systemd-networkd'])
self.assertEqual(self.read_attr('port1', 'brport/priority'), '28')
def test_bridge_port_priority_set_zero(self):
"""It should be possible to set the bridge port priority to 0"""
self.assertEqual(self.read_attr('port2', 'brport/priority'), '32')
self.write_network_dropin('port2.network', 'priority', '''\
[Bridge]
Priority=0
''')
subprocess.check_call(['systemctl', 'restart', 'systemd-networkd'])
self.assertEqual(self.read_attr('port2', 'brport/priority'), '0')
def test_bridge_port_property(self):
"""Test the "[Bridge]" section keys"""
self.assertEqual(self.read_attr('port2', 'brport/priority'), '32')
self.write_network_dropin('port2.network', 'property', '''\
[Bridge]
UnicastFlood=true
HairPin=true
UseBPDU=true
FastLeave=true
AllowPortToBeRoot=true
Cost=555
Priority=23
''')
subprocess.check_call(['systemctl', 'restart', 'systemd-networkd'])
self.assertEqual(self.read_attr('port2', 'brport/priority'), '23')
self.assertEqual(self.read_attr('port2', 'brport/hairpin_mode'), '1')
self.assertEqual(self.read_attr('port2', 'brport/path_cost'), '555')
self.assertEqual(self.read_attr('port2', 'brport/multicast_fast_leave'), '1')
self.assertEqual(self.read_attr('port2', 'brport/unicast_flood'), '1')
self.assertEqual(self.read_attr('port2', 'brport/bpdu_guard'), '1')
self.assertEqual(self.read_attr('port2', 'brport/root_block'), '1')
class ClientTestBase(NetworkdTestingUtilities):
"""Provide common methods for testing networkd against servers."""
@classmethod
def setUpClass(klass):
klass.orig_log_level = subprocess.check_output(
['systemctl', 'show', '--value', '--property', 'LogLevel'],
universal_newlines=True).strip()
subprocess.check_call(['systemd-analyze', 'log-level', 'debug'])
@classmethod
def tearDownClass(klass):
subprocess.check_call(['systemd-analyze', 'log-level', klass.orig_log_level])
def setUp(self):
self.iface = 'test_eth42'
self.if_router = 'router_eth42'
self.workdir_obj = tempfile.TemporaryDirectory()
self.workdir = self.workdir_obj.name
self.config = 'test_eth42.network'
# get current journal cursor
subprocess.check_output(['journalctl', '--sync'])
out = subprocess.check_output(['journalctl', '-b', '--quiet',
'--no-pager', '-n0', '--show-cursor'],
universal_newlines=True)
self.assertTrue(out.startswith('-- cursor:'))
self.journal_cursor = out.split()[-1]
subprocess.call(['systemctl', 'reset-failed', 'systemd-networkd', 'systemd-resolved'])
def tearDown(self):
self.shutdown_iface()
subprocess.call(['systemctl', 'stop', 'systemd-networkd'])
subprocess.call(['ip', 'link', 'del', 'dummy0'],
stderr=subprocess.DEVNULL)
def show_journal(self, unit):
'''Show journal of given unit since start of the test'''
print('---- {} ----'.format(unit))
subprocess.check_output(['journalctl', '--sync'])
sys.stdout.flush()
subprocess.call(['journalctl', '-b', '--no-pager', '--quiet',
'--cursor', self.journal_cursor, '-u', unit])
def create_iface(self, ipv6=False):
'''Create test interface with DHCP server behind it'''
raise NotImplementedError('must be implemented by a subclass')
def shutdown_iface(self):
'''Remove test interface and stop DHCP server'''
raise NotImplementedError('must be implemented by a subclass')
def print_server_log(self):
'''Print DHCP server log for debugging failures'''
raise NotImplementedError('must be implemented by a subclass')
def start_unit(self, unit):
try:
subprocess.check_call(['systemctl', 'start', unit])
except subprocess.CalledProcessError:
self.show_journal(unit)
raise
def do_test(self, coldplug=True, ipv6=False, extra_opts='',
online_timeout=10, dhcp_mode='yes'):
self.start_unit('systemd-resolved')
self.write_network(self.config, '''\
[Match]
Name={}
[Network]
DHCP={}
{}'''.format(self.iface, dhcp_mode, extra_opts))
if coldplug:
# create interface first, then start networkd
self.create_iface(ipv6=ipv6)
self.start_unit('systemd-networkd')
elif coldplug is not None:
# start networkd first, then create interface
self.start_unit('systemd-networkd')
self.create_iface(ipv6=ipv6)
else:
# "None" means test sets up interface by itself
self.start_unit('systemd-networkd')
try:
subprocess.check_call([NETWORKD_WAIT_ONLINE, '--interface',
self.iface, '--timeout=%i' % online_timeout])
if ipv6:
# check iface state and IP 6 address; FIXME: we need to wait a bit
# longer, as the iface is "configured" already with IPv4 *or*
# IPv6, but we want to wait for both
for _ in range(10):
out = subprocess.check_output(['ip', 'a', 'show', 'dev', self.iface])
if b'state UP' in out and b'inet6 2600' in out and b'inet 192.168' in out:
break
time.sleep(1)
else:
self.fail('timed out waiting for IPv6 configuration')
self.assertRegex(out, b'inet6 2600::.* scope global .*dynamic')
self.assertRegex(out, b'inet6 fe80::.* scope link')
else:
# should have link-local address on IPv6 only
out = subprocess.check_output(['ip', '-6', 'a', 'show', 'dev', self.iface])
self.assertRegex(out, br'inet6 fe80::.* scope link')
self.assertNotIn(b'scope global', out)
# should have IPv4 address
out = subprocess.check_output(['ip', '-4', 'a', 'show', 'dev', self.iface])
self.assertIn(b'state UP', out)
self.assertRegex(out, br'inet 192.168.5.\d+/.* scope global dynamic')
# check networkctl state
out = subprocess.check_output(['networkctl'])
self.assertRegex(out, (r'{}\s+ether\s+[a-z-]+\s+unmanaged'.format(self.if_router)).encode())
self.assertRegex(out, (r'{}\s+ether\s+routable\s+configured'.format(self.iface)).encode())
out = subprocess.check_output(['networkctl', 'status', self.iface])
self.assertRegex(out, br'Type:\s+ether')
self.assertRegex(out, br'State:\s+routable.*configured')
self.assertRegex(out, br'Address:\s+192.168.5.\d+')
if ipv6:
self.assertRegex(out, br'2600::')
else:
self.assertNotIn(br'2600::', out)
self.assertRegex(out, br'fe80::')
self.assertRegex(out, br'Gateway:\s+192.168.5.1')
self.assertRegex(out, br'DNS:\s+192.168.5.1')
except (AssertionError, subprocess.CalledProcessError):
# show networkd status, journal, and DHCP server log on failure
with open(os.path.join(NETWORK_UNITDIR, self.config)) as f:
print('\n---- {} ----\n{}'.format(self.config, f.read()))
print('---- interface status ----')
sys.stdout.flush()
subprocess.call(['ip', 'a', 'show', 'dev', self.iface])
print('---- networkctl status {} ----'.format(self.iface))
sys.stdout.flush()
rc = subprocess.call(['networkctl', 'status', self.iface])
if rc != 0:
print("'networkctl status' exited with an unexpected code {}".format(rc))
self.show_journal('systemd-networkd.service')
self.print_server_log()
raise
for timeout in range(50):
with open(RESOLV_CONF) as f:
contents = f.read()
if 'nameserver 192.168.5.1\n' in contents:
break
time.sleep(0.1)
else:
self.fail('nameserver 192.168.5.1 not found in ' + RESOLV_CONF)
if coldplug is False:
# check post-down.d hook
self.shutdown_iface()
def test_coldplug_dhcp_yes_ip4(self):
# we have a 12s timeout on RA, so we need to wait longer
self.do_test(coldplug=True, ipv6=False, online_timeout=15)
def test_coldplug_dhcp_yes_ip4_no_ra(self):
# with disabling RA explicitly things should be fast
self.do_test(coldplug=True, ipv6=False,
extra_opts='IPv6AcceptRA=False')
def test_coldplug_dhcp_ip4_only(self):
# we have a 12s timeout on RA, so we need to wait longer
self.do_test(coldplug=True, ipv6=False, dhcp_mode='ipv4',
online_timeout=15)
def test_coldplug_dhcp_ip4_only_no_ra(self):
# with disabling RA explicitly things should be fast
self.do_test(coldplug=True, ipv6=False, dhcp_mode='ipv4',
extra_opts='IPv6AcceptRA=False')
def test_coldplug_dhcp_ip6(self):
self.do_test(coldplug=True, ipv6=True)
def test_hotplug_dhcp_ip4(self):
# With IPv4 only we have a 12s timeout on RA, so we need to wait longer
self.do_test(coldplug=False, ipv6=False, online_timeout=15)
def test_hotplug_dhcp_ip6(self):
self.do_test(coldplug=False, ipv6=True)
def test_route_only_dns(self):
self.write_network('myvpn.netdev', '''\
[NetDev]
Name=dummy0
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('myvpn.network', '''\
[Match]
Name=dummy0
[Network]
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= ~company''')
try:
self.do_test(coldplug=True, ipv6=False,
extra_opts='IPv6AcceptRouterAdvertisements=False')
except subprocess.CalledProcessError as e:
# networkd often fails to start in LXC: https://github.com/systemd/systemd/issues/11848
if IS_CONTAINER and e.cmd == ['systemctl', 'start', 'systemd-networkd']:
raise unittest.SkipTest('https://github.com/systemd/systemd/issues/11848')
else:
raise
with open(RESOLV_CONF) as f:
contents = f.read()
# ~company is not a search domain, only a routing domain
self.assertNotRegex(contents, 'search.*company')
# our global server should appear
self.assertIn('nameserver 192.168.5.1\n', contents)
# should not have domain-restricted server as global server
self.assertNotIn('nameserver 192.168.42.1\n', contents)
def test_route_only_dns_all_domains(self):
self.write_network('myvpn.netdev', '''[NetDev]
Name=dummy0
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('myvpn.network', '''[Match]
Name=dummy0
[Network]
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= ~company ~.''')
try:
self.do_test(coldplug=True, ipv6=False,
extra_opts='IPv6AcceptRouterAdvertisements=False')
except subprocess.CalledProcessError as e:
# networkd often fails to start in LXC: https://github.com/systemd/systemd/issues/11848
if IS_CONTAINER and e.cmd == ['systemctl', 'start', 'systemd-networkd']:
raise unittest.SkipTest('https://github.com/systemd/systemd/issues/11848')
else:
raise
with open(RESOLV_CONF) as f:
contents = f.read()
# ~company is not a search domain, only a routing domain
self.assertNotRegex(contents, 'search.*company')
# our global server should appear
self.assertIn('nameserver 192.168.5.1\n', contents)
# should have company server as global server due to ~.
self.assertIn('nameserver 192.168.42.1\n', contents)
@unittest.skipUnless(HAVE_DNSMASQ, 'dnsmasq not installed')
class DnsmasqClientTest(ClientTestBase, unittest.TestCase):
'''Test networkd client against dnsmasq'''
def setUp(self):
super().setUp()
self.dnsmasq = None
self.iface_mac = 'de:ad:be:ef:47:11'
def create_iface(self, ipv6=False, dnsmasq_opts=None):
'''Create test interface with DHCP server behind it'''
# add veth pair
subprocess.check_call(['ip', 'link', 'add', 'name', self.iface,
'address', self.iface_mac,
'type', 'veth', 'peer', 'name', self.if_router])
# give our router an IP
subprocess.check_call(['ip', 'a', 'flush', 'dev', self.if_router])
subprocess.check_call(['ip', 'a', 'add', '192.168.5.1/24', 'dev', self.if_router])
if ipv6:
subprocess.check_call(['ip', 'a', 'add', '2600::1/64', 'dev', self.if_router])
subprocess.check_call(['ip', 'link', 'set', self.if_router, 'up'])
# add DHCP server
self.dnsmasq_log = os.path.join(self.workdir, 'dnsmasq.log')
lease_file = os.path.join(self.workdir, 'dnsmasq.leases')
if ipv6:
extra_opts = ['--enable-ra', '--dhcp-range=2600::10,2600::20']
else:
extra_opts = []
if dnsmasq_opts:
extra_opts += dnsmasq_opts
self.dnsmasq = subprocess.Popen(
['dnsmasq', '--keep-in-foreground', '--log-queries',
'--log-facility=' + self.dnsmasq_log, '--conf-file=/dev/null',
'--dhcp-leasefile=' + lease_file, '--bind-interfaces',
'--interface=' + self.if_router, '--except-interface=lo',
'--dhcp-range=192.168.5.10,192.168.5.200'] + extra_opts)
def shutdown_iface(self):
'''Remove test interface and stop DHCP server'''
if self.if_router:
subprocess.check_call(['ip', 'link', 'del', 'dev', self.if_router])
self.if_router = None
if self.dnsmasq:
self.dnsmasq.kill()
self.dnsmasq.wait()
self.dnsmasq = None
def print_server_log(self):
'''Print DHCP server log for debugging failures'''
with open(self.dnsmasq_log) as f:
sys.stdout.write('\n\n---- dnsmasq log ----\n{}\n------\n\n'.format(f.read()))
def test_resolved_domain_restricted_dns(self):
'''resolved: domain-restricted DNS servers'''
# FIXME: resolvectl query fails with enabled DNSSEC against our dnsmasq
conf = '/run/systemd/resolved.conf.d/test-disable-dnssec.conf'
os.makedirs(os.path.dirname(conf), exist_ok=True)
with open(conf, 'w') as f:
f.write('[Resolve]\nDNSSEC=no\n')
self.addCleanup(os.remove, conf)
# create interface for generic connections; this will map all DNS names
# to 192.168.42.1
self.create_iface(dnsmasq_opts=['--address=/#/192.168.42.1'])
self.write_network('general.network', '''\
[Match]
Name={}
[Network]
DHCP=ipv4
IPv6AcceptRA=False'''.format(self.iface))
# create second device/dnsmasq for a .company/.lab VPN interface
# static IPs for simplicity
self.add_veth_pair('testvpnclient', 'testvpnrouter')
subprocess.check_call(['ip', 'a', 'flush', 'dev', 'testvpnrouter'])
subprocess.check_call(['ip', 'a', 'add', '10.241.3.1/24', 'dev', 'testvpnrouter'])
subprocess.check_call(['ip', 'link', 'set', 'testvpnrouter', 'up'])
vpn_dnsmasq_log = os.path.join(self.workdir, 'dnsmasq-vpn.log')
vpn_dnsmasq = subprocess.Popen(
['dnsmasq', '--keep-in-foreground', '--log-queries',
'--log-facility=' + vpn_dnsmasq_log, '--conf-file=/dev/null',
'--dhcp-leasefile=/dev/null', '--bind-interfaces',
'--interface=testvpnrouter', '--except-interface=lo',
'--address=/math.lab/10.241.3.3', '--address=/cantina.company/10.241.4.4'])
self.addCleanup(vpn_dnsmasq.wait)
self.addCleanup(vpn_dnsmasq.kill)
self.write_network('vpn.network', '''\
[Match]
Name=testvpnclient
[Network]
IPv6AcceptRA=False
Address=10.241.3.2/24
DNS=10.241.3.1
Domains= ~company ~lab''')
self.start_unit('systemd-networkd')
subprocess.check_call([NETWORKD_WAIT_ONLINE, '--interface', self.iface,
'--interface=testvpnclient', '--timeout=20'])
# ensure we start fresh with every test
subprocess.check_call(['systemctl', 'restart', 'systemd-resolved'])
# test vpnclient specific domains; these should *not* be answered by
# the general DNS
out = subprocess.check_output(['resolvectl', 'query', 'math.lab'])
self.assertIn(b'math.lab: 10.241.3.3', out)
out = subprocess.check_output(['resolvectl', 'query', 'kettle.cantina.company'])
self.assertIn(b'kettle.cantina.company: 10.241.4.4', out)
# test general domains
out = subprocess.check_output(['resolvectl', 'query', 'megasearch.net'])
self.assertIn(b'megasearch.net: 192.168.42.1', out)
with open(self.dnsmasq_log) as f:
general_log = f.read()
with open(vpn_dnsmasq_log) as f:
vpn_log = f.read()
# VPN domains should only be sent to VPN DNS
self.assertRegex(vpn_log, 'query.*math.lab')
self.assertRegex(vpn_log, 'query.*cantina.company')
self.assertNotIn('.lab', general_log)
self.assertNotIn('.company', general_log)
# general domains should not be sent to the VPN DNS
self.assertRegex(general_log, 'query.*megasearch.net')
self.assertNotIn('megasearch.net', vpn_log)
def test_resolved_etc_hosts(self):
'''resolved queries to /etc/hosts'''
# FIXME: -t MX query fails with enabled DNSSEC (even when using
# the known negative trust anchor .internal instead of .example.com)
conf = '/run/systemd/resolved.conf.d/test-disable-dnssec.conf'
os.makedirs(os.path.dirname(conf), exist_ok=True)
with open(conf, 'w') as f:
f.write('[Resolve]\nDNSSEC=no\nLLMNR=no\nMulticastDNS=no\n')
self.addCleanup(os.remove, conf)
# create /etc/hosts bind mount which resolves my.example.com for IPv4
hosts = os.path.join(self.workdir, 'hosts')
with open(hosts, 'w') as f:
f.write('172.16.99.99 my.example.com\n')
subprocess.check_call(['mount', '--bind', hosts, '/etc/hosts'])
self.addCleanup(subprocess.call, ['umount', '/etc/hosts'])
subprocess.check_call(['systemctl', 'stop', 'systemd-resolved.service'])
# note: different IPv4 address here, so that it's easy to tell apart
# what resolved the query
self.create_iface(dnsmasq_opts=['--host-record=my.example.com,172.16.99.1,2600::99:99',
'--host-record=other.example.com,172.16.0.42,2600::42',
'--mx-host=example.com,mail.example.com'],
ipv6=True)
self.do_test(coldplug=None, ipv6=True)
try:
# family specific queries
out = subprocess.check_output(['resolvectl', 'query', '-4', 'my.example.com'])
self.assertIn(b'my.example.com: 172.16.99.99', out)
# we don't expect an IPv6 answer; if /etc/hosts has any IP address,
# it's considered a sufficient source
self.assertNotEqual(subprocess.call(['resolvectl', 'query', '-6', 'my.example.com']), 0)
# "any family" query; IPv4 should come from /etc/hosts
out = subprocess.check_output(['resolvectl', 'query', 'my.example.com'])
self.assertIn(b'my.example.com: 172.16.99.99', out)
# IP → name lookup; again, takes the /etc/hosts one
out = subprocess.check_output(['resolvectl', 'query', '172.16.99.99'])
self.assertIn(b'172.16.99.99: my.example.com', out)
# non-address RRs should fall back to DNS
out = subprocess.check_output(['resolvectl', 'query', '--type=MX', 'example.com'])
self.assertIn(b'example.com IN MX 1 mail.example.com', out)
# other domains query DNS
out = subprocess.check_output(['resolvectl', 'query', 'other.example.com'])
self.assertIn(b'172.16.0.42', out)
out = subprocess.check_output(['resolvectl', 'query', '172.16.0.42'])
self.assertIn(b'172.16.0.42: other.example.com', out)
except (AssertionError, subprocess.CalledProcessError):
self.show_journal('systemd-resolved.service')
self.print_server_log()
raise
def test_transient_hostname(self):
'''networkd sets transient hostname from DHCP'''
orig_hostname = socket.gethostname()
self.addCleanup(socket.sethostname, orig_hostname)
# temporarily move /etc/hostname away; restart hostnamed to pick it up
if os.path.exists('/etc/hostname'):
subprocess.check_call(['mount', '--bind', '/dev/null', '/etc/hostname'])
self.addCleanup(subprocess.call, ['umount', '/etc/hostname'])
subprocess.check_call(['systemctl', 'stop', 'systemd-hostnamed.service'])
self.addCleanup(subprocess.call, ['systemctl', 'stop', 'systemd-hostnamed.service'])
self.create_iface(dnsmasq_opts=['--dhcp-host={},192.168.5.210,testgreen'.format(self.iface_mac)])
self.do_test(coldplug=None, extra_opts='IPv6AcceptRA=False', dhcp_mode='ipv4')
try:
# should have received the fixed IP above
out = subprocess.check_output(['ip', '-4', 'a', 'show', 'dev', self.iface])
self.assertRegex(out, b'inet 192.168.5.210/24 .* scope global dynamic')
# should have set transient hostname in hostnamed; this is
# sometimes a bit lagging (issue #4753), so retry a few times
for retry in range(1, 6):
out = subprocess.check_output(['hostnamectl'])
if b'testgreen' in out:
break
time.sleep(5)
sys.stdout.write('[retry %i] ' % retry)
sys.stdout.flush()
else:
self.fail('Transient hostname not found in hostnamectl:\n{}'.format(out.decode()))
# and also applied to the system
self.assertEqual(socket.gethostname(), 'testgreen')
except AssertionError:
self.show_journal('systemd-networkd.service')
self.show_journal('systemd-hostnamed.service')
self.print_server_log()
raise
def test_transient_hostname_with_static(self):
'''transient hostname is not applied if static hostname exists'''
orig_hostname = socket.gethostname()
self.addCleanup(socket.sethostname, orig_hostname)
if not os.path.exists('/etc/hostname'):
self.write_config('/etc/hostname', "foobarqux")
else:
self.write_config('/run/hostname.tmp', "foobarqux")
subprocess.check_call(['mount', '--bind', '/run/hostname.tmp', '/etc/hostname'])
self.addCleanup(subprocess.call, ['umount', '/etc/hostname'])
socket.sethostname("foobarqux");
subprocess.check_call(['systemctl', 'stop', 'systemd-hostnamed.service'])
self.addCleanup(subprocess.call, ['systemctl', 'stop', 'systemd-hostnamed.service'])
self.create_iface(dnsmasq_opts=['--dhcp-host={},192.168.5.210,testgreen'.format(self.iface_mac)])
self.do_test(coldplug=None, extra_opts='IPv6AcceptRA=False', dhcp_mode='ipv4')
try:
# should have received the fixed IP above
out = subprocess.check_output(['ip', '-4', 'a', 'show', 'dev', self.iface])
self.assertRegex(out, b'inet 192.168.5.210/24 .* scope global dynamic')
# static hostname wins over transient one, thus *not* applied
self.assertEqual(socket.gethostname(), "foobarqux")
except AssertionError:
self.show_journal('systemd-networkd.service')
self.show_journal('systemd-hostnamed.service')
self.print_server_log()
raise
class NetworkdClientTest(ClientTestBase, unittest.TestCase):
'''Test networkd client against networkd server'''
def setUp(self):
super().setUp()
self.dnsmasq = None
def create_iface(self, ipv6=False, dhcpserver_opts=None):
'''Create test interface with DHCP server behind it'''
# run "router-side" networkd in own mount namespace to shield it from
# "client-side" configuration and networkd
(fd, script) = tempfile.mkstemp(prefix='networkd-router.sh')
self.addCleanup(os.remove, script)
with os.fdopen(fd, 'w+') as f:
f.write('''\
#!/bin/sh
set -eu
mkdir -p /run/systemd/network
mkdir -p /run/systemd/netif
mount -t tmpfs none /run/systemd/network
mount -t tmpfs none /run/systemd/netif
[ ! -e /run/dbus ] || mount -t tmpfs none /run/dbus
# create router/client veth pair
cat << EOF > /run/systemd/network/test.netdev
[NetDev]
Name=%(ifr)s
Kind=veth
[Peer]
Name=%(ifc)s
EOF
cat << EOF > /run/systemd/network/test.network
[Match]
Name=%(ifr)s
[Network]
Address=192.168.5.1/24
%(addr6)s
DHCPServer=yes
[DHCPServer]
PoolOffset=10
PoolSize=50
DNS=192.168.5.1
%(dhopts)s
EOF
# run networkd as in systemd-networkd.service
exec $(systemctl cat systemd-networkd.service | sed -n '/^ExecStart=/ { s/^.*=//; s/^[@+-]//; s/^!*//; p}')
''' % {'ifr': self.if_router, 'ifc': self.iface, 'addr6': ipv6 and 'Address=2600::1/64' or '',
'dhopts': dhcpserver_opts or ''})
os.fchmod(fd, 0o755)
subprocess.check_call(['systemd-run', '--unit=networkd-test-router.service',
'-p', 'InaccessibleDirectories=-/etc/systemd/network',
'-p', 'InaccessibleDirectories=-/run/systemd/network',
'-p', 'InaccessibleDirectories=-/run/systemd/netif',
'--service-type=notify', script])
# wait until devices got created
for _ in range(50):
out = subprocess.check_output(['ip', 'a', 'show', 'dev', self.if_router])
if b'state UP' in out and b'scope global' in out:
break
time.sleep(0.1)
def shutdown_iface(self):
'''Remove test interface and stop DHCP server'''
if self.if_router:
subprocess.check_call(['systemctl', 'stop', 'networkd-test-router.service'])
# ensure failed transient unit does not stay around
subprocess.call(['systemctl', 'reset-failed', 'networkd-test-router.service'])
subprocess.call(['ip', 'link', 'del', 'dev', self.if_router])
self.if_router = None
def print_server_log(self):
'''Print DHCP server log for debugging failures'''
self.show_journal('networkd-test-router.service')
@unittest.skip('networkd does not have DHCPv6 server support')
def test_hotplug_dhcp_ip6(self):
pass
@unittest.skip('networkd does not have DHCPv6 server support')
def test_coldplug_dhcp_ip6(self):
pass
def test_search_domains(self):
# we don't use this interface for this test
self.if_router = None
self.write_network('test.netdev', '''\
[NetDev]
Name=dummy0
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('test.network', '''\
[Match]
Name=dummy0
[Network]
Address=192.168.42.100/24
DNS=192.168.42.1
Domains= one two three four five six seven eight nine ten''')
self.start_unit('systemd-networkd')
for timeout in range(50):
with open(RESOLV_CONF) as f:
contents = f.read()
if ' one' in contents:
break
time.sleep(0.1)
self.assertRegex(contents, 'search .*one two three four')
self.assertNotIn('seven\n', contents)
self.assertIn('# Too many search domains configured, remaining ones ignored.\n', contents)
def test_search_domains_too_long(self):
# we don't use this interface for this test
self.if_router = None
name_prefix = 'a' * 60
self.write_network('test.netdev', '''\
[NetDev]
Name=dummy0
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('test.network', '''\
[Match]
Name=dummy0
[Network]
Address=192.168.42.100/24
DNS=192.168.42.1
Domains={p}0 {p}1 {p}2 {p}3 {p}4'''.format(p=name_prefix))
self.start_unit('systemd-networkd')
for timeout in range(50):
with open(RESOLV_CONF) as f:
contents = f.read()
if ' one' in contents:
break
time.sleep(0.1)
self.assertRegex(contents, 'search .*{p}0 {p}1 {p}2'.format(p=name_prefix))
self.assertIn('# Total length of all search domains is too long, remaining ones ignored.', contents)
def test_dropin(self):
# we don't use this interface for this test
self.if_router = None
self.write_network('test.netdev', '''\
[NetDev]
Name=dummy0
Kind=dummy
MACAddress=12:34:56:78:9a:bc''')
self.write_network('test.network', '''\
[Match]
Name=dummy0
[Network]
Address=192.168.42.100/24
DNS=192.168.42.1''')
self.write_network_dropin('test.network', 'dns', '''\
[Network]
DNS=127.0.0.1''')
self.start_unit('systemd-resolved')
self.start_unit('systemd-networkd')
for timeout in range(50):
with open(RESOLV_CONF) as f:
contents = f.read()
if ' 127.0.0.1' in contents and '192.168.42.1' in contents:
break
time.sleep(0.1)
self.assertIn('nameserver 192.168.42.1\n', contents)
self.assertIn('nameserver 127.0.0.1\n', contents)
def test_dhcp_timezone(self):
'''networkd sets time zone from DHCP'''
def get_tz():
out = subprocess.check_output(['busctl', 'get-property', 'org.freedesktop.timedate1',
'/org/freedesktop/timedate1', 'org.freedesktop.timedate1', 'Timezone'])
assert out.startswith(b's "')
out = out.strip()
assert out.endswith(b'"')
return out[3:-1].decode()
orig_timezone = get_tz()
self.addCleanup(subprocess.call, ['timedatectl', 'set-timezone', orig_timezone])
self.create_iface(dhcpserver_opts='EmitTimezone=yes\nTimezone=Pacific/Honolulu')
self.do_test(coldplug=None, extra_opts='IPv6AcceptRA=false\n[DHCP]\nUseTimezone=true', dhcp_mode='ipv4')
# should have applied the received timezone
try:
self.assertEqual(get_tz(), 'Pacific/Honolulu')
except AssertionError:
self.show_journal('systemd-networkd.service')
self.show_journal('systemd-hostnamed.service')
raise
class MatchClientTest(unittest.TestCase, NetworkdTestingUtilities):
"""Test [Match] sections in .network files.
Be aware that matching the test host's interfaces will wipe their
configuration, so as a precaution, all network files should have a
restrictive [Match] section to only ever interfere with the
temporary veth interfaces created here.
"""
def tearDown(self):
"""Stop networkd."""
subprocess.call(['systemctl', 'stop', 'systemd-networkd'])
def test_basic_matching(self):
"""Verify the Name= line works throughout this class."""
self.add_veth_pair('test_if1', 'fake_if2')
self.write_network('test.network', "[Match]\nName=test_*\n[Network]")
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(test_if1='managed', fake_if2='unmanaged')
def test_inverted_matching(self):
"""Verify that a '!'-prefixed value inverts the match."""
# Use a MAC address as the interfaces' common matching attribute
# to avoid depending on udev, to support testing in containers.
mac = '00:01:02:03:98:99'
self.add_veth_pair('test_veth', 'test_peer',
['addr', mac], ['addr', mac])
self.write_network('no-veth.network', """\
[Match]
MACAddress={}
Name=!nonexistent *peer*
[Network]""".format(mac))
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(test_veth='managed', test_peer='unmanaged')
class UnmanagedClientTest(unittest.TestCase, NetworkdTestingUtilities):
"""Test if networkd manages the correct interfaces."""
def setUp(self):
"""Write .network files to match the named veth devices."""
# Define the veth+peer pairs to be created.
# Their pairing doesn't actually matter, only their names do.
self.veths = {
'm1def': 'm0unm',
'm1man': 'm1unm',
}
# Define the contents of .network files to be read in order.
self.configs = (
"[Match]\nName=m1def\n",
"[Match]\nName=m1unm\n[Link]\nUnmanaged=yes\n",
"[Match]\nName=m1*\n[Link]\nUnmanaged=no\n",
)
# Write out the .network files to be cleaned up automatically.
for i, config in enumerate(self.configs):
self.write_network("%02d-test.network" % i, config)
def tearDown(self):
"""Stop networkd."""
subprocess.call(['systemctl', 'stop', 'systemd-networkd'])
def create_iface(self):
"""Create temporary veth pairs for interface matching."""
for veth, peer in self.veths.items():
self.add_veth_pair(veth, peer)
def test_unmanaged_setting(self):
"""Verify link states with Unmanaged= settings, hot-plug."""
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.create_iface()
self.assert_link_states(m1def='managed',
m1man='managed',
m1unm='unmanaged',
m0unm='unmanaged')
def test_unmanaged_setting_coldplug(self):
"""Verify link states with Unmanaged= settings, cold-plug."""
self.create_iface()
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(m1def='managed',
m1man='managed',
m1unm='unmanaged',
m0unm='unmanaged')
def test_catchall_config(self):
"""Verify link states with a catch-all config, hot-plug."""
# Don't actually catch ALL interfaces. It messes up the host.
self.write_network('all.network', "[Match]\nName=m[01]???\n")
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.create_iface()
self.assert_link_states(m1def='managed',
m1man='managed',
m1unm='unmanaged',
m0unm='managed')
def test_catchall_config_coldplug(self):
"""Verify link states with a catch-all config, cold-plug."""
# Don't actually catch ALL interfaces. It messes up the host.
self.write_network('all.network', "[Match]\nName=m[01]???\n")
self.create_iface()
subprocess.check_call(['systemctl', 'start', 'systemd-networkd'])
self.assert_link_states(m1def='managed',
m1man='managed',
m1unm='unmanaged',
m0unm='managed')
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout,
verbosity=2))
|
gpl-2.0
|
tigersirvine/occtigerscricket
|
django/contrib/syndication/views.py
|
85
|
7622
|
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.utils.html import escape
from django.utils.timezone import is_naive
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
url = iri_to_uri(u'%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_unicode(item))
def item_description(self, item):
return force_unicode(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(
current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self.__get_dynamic_attr('item_link', item),
request.is_secure(),
)
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
|
bsd-3-clause
|
wisechengyi/pants
|
src/python/pants/backend/jvm/targets/java_library.py
|
2
|
1370
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.junit_tests import JUnitTests
class JavaLibrary(ExportableJvmLibrary):
"""A Java library.
Normally has conceptually-related sources; invoking the ``compile`` goal
on this target compiles Java and generates classes. Invoking the ``jar``
goal on this target creates a ``.jar``; but that's an unusual thing to do.
Instead, a ``jvm_binary`` might depend on this library; that binary is a
more sensible thing to bundle.
:API: public
"""
default_sources_globs = "*.java"
default_sources_exclude_globs = JUnitTests.java_test_globs
@classmethod
def subsystems(cls):
return super().subsystems()
def __init__(self, address=None, **kwargs):
super().__init__(address=address, **kwargs)
if "scalac_plugins" in kwargs:
raise self.IllegalArgument(
address.spec, "java_library does not support the scalac_plugins argument."
)
if "scalac_plugin_args" in kwargs:
raise self.IllegalArgument(
address.spec, "java_library does not support the scalac_plugin_args argument."
)
|
apache-2.0
|
smunaut/gnuradio
|
gr-audio/examples/python/audio_fft.py
|
68
|
4596
|
#!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, audio
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, waterfallsink2, scopesink2, form, slider
from optparse import OptionParser
import wx
import sys
class app_top_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-W", "--waterfall", action="store_true", default=False,
help="Enable waterfall display")
parser.add_option("-S", "--oscilloscope", action="store_true", default=False,
help="Enable oscilloscope display")
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
sample_rate = int(options.sample_rate)
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.show_debug_info = True
# build the graph
if options.waterfall:
self.scope = \
waterfallsink2.waterfall_sink_f (panel, fft_size=1024, sample_rate=sample_rate)
elif options.oscilloscope:
self.scope = scopesink2.scope_sink_f(panel, sample_rate=sample_rate)
else:
self.scope = fftsink2.fft_sink_f (panel, fft_size=1024, sample_rate=sample_rate, fft_rate=30,
ref_scale=1.0, ref_level=0, y_divs=12)
self.src = audio.source (sample_rate, options.audio_input)
self.connect(self.src, self.scope)
self._build_gui(vbox)
# set initial values
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
vbox.Add(self.scope.win, 10, wx.EXPAND)
#self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
def _form_set_decim(kv):
return self.set_decim(kv['decim'])
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['decim'] = form.int_field(
parent=panel, sizer=hbox, label="Decim",
callback=myform.check_input_and_call(_form_set_decim, self._set_status_msg))
hbox.Add((5,0), 1)
myform['fs@usb'] = form.static_float_field(
parent=panel, sizer=hbox, label="Fs@USB")
hbox.Add((5,0), 1)
myform['dbname'] = form.static_text_field(
parent=panel, sizer=hbox)
hbox.Add((5,0), 1)
myform['baseband'] = form.static_float_field(
parent=panel, sizer=hbox, label="Analog BB")
hbox.Add((5,0), 1)
myform['ddc'] = form.static_float_field(
parent=panel, sizer=hbox, label="DDC")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def main ():
app = stdgui2.stdapp(app_top_block, "Audio FFT", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
|
gpl-3.0
|
rven/odoo
|
addons/sale/tests/test_onchange.py
|
5
|
12358
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import Form
from odoo.tests.common import TransactionCase
class TestOnchangeProductId(TransactionCase):
"""Test that when an included tax is mapped by a fiscal position, the included tax must be
subtracted to the price of the product.
"""
def setUp(self):
super(TestOnchangeProductId, self).setUp()
self.fiscal_position_model = self.env['account.fiscal.position']
self.fiscal_position_tax_model = self.env['account.fiscal.position.tax']
self.tax_model = self.env['account.tax']
self.so_model = self.env['sale.order']
self.po_line_model = self.env['sale.order.line']
self.res_partner_model = self.env['res.partner']
self.product_tmpl_model = self.env['product.template']
self.product_model = self.env['product.product']
self.product_uom_model = self.env['uom.uom']
self.supplierinfo_model = self.env["product.supplierinfo"]
self.pricelist_model = self.env['product.pricelist']
def test_onchange_product_id(self):
uom_id = self.product_uom_model.search([('name', '=', 'Units')])[0]
pricelist = self.pricelist_model.search([('name', '=', 'Public Pricelist')])[0]
partner_id = self.res_partner_model.create(dict(name="George"))
tax_include_id = self.tax_model.create(dict(name="Include tax",
amount='21.00',
price_include=True,
type_tax_use='sale'))
tax_exclude_id = self.tax_model.create(dict(name="Exclude tax",
amount='0.00',
type_tax_use='sale'))
product_tmpl_id = self.product_tmpl_model.create(dict(name="Voiture",
list_price=121,
taxes_id=[(6, 0, [tax_include_id.id])]))
product_id = product_tmpl_id.product_variant_id
fp_id = self.fiscal_position_model.create(dict(name="fiscal position", sequence=1))
fp_tax_id = self.fiscal_position_tax_model.create(dict(position_id=fp_id.id,
tax_src_id=tax_include_id.id,
tax_dest_id=tax_exclude_id.id))
# Create the SO with one SO line and apply a pricelist and fiscal position on it
order_form = Form(self.env['sale.order'].with_context(tracking_disable=True))
order_form.partner_id = partner_id
order_form.pricelist_id = pricelist
order_form.fiscal_position_id = fp_id
with order_form.order_line.new() as line:
line.name = product_id.name
line.product_id = product_id
line.product_uom_qty = 1.0
line.product_uom = uom_id
sale_order = order_form.save()
# Check the unit price of SO line
self.assertEqual(100, sale_order.order_line[0].price_unit, "The included tax must be subtracted to the price")
def test_pricelist_application(self):
""" Test different prices are correctly applied based on dates """
support_product = self.env['product.product'].create({
'name': 'Virtual Home Staging',
'list_price': 100,
})
partner = self.res_partner_model.create(dict(name="George"))
christmas_pricelist = self.env['product.pricelist'].create({
'name': 'Christmas pricelist',
'item_ids': [(0, 0, {
'date_start': "2017-12-01",
'date_end': "2017-12-24",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 20,
'applied_on': '3_global',
'name': 'Pre-Christmas discount'
}), (0, 0, {
'date_start': "2017-12-25",
'date_end': "2017-12-31",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 50,
'applied_on': '3_global',
'name': 'Post-Christmas super-discount'
})]
})
# Create the SO with pricelist based on date
order_form = Form(self.env['sale.order'].with_context(tracking_disable=True))
order_form.partner_id = partner
order_form.date_order = '2017-12-20'
order_form.pricelist_id = christmas_pricelist
with order_form.order_line.new() as line:
line.product_id = support_product
so = order_form.save()
# Check the unit price and subtotal of SO line
self.assertEqual(so.order_line[0].price_unit, 80, "First date pricelist rule not applied")
self.assertEqual(so.order_line[0].price_subtotal, so.order_line[0].price_unit * so.order_line[0].product_uom_qty, 'Total of SO line should be a multiplication of unit price and ordered quantity')
# Change order date of the SO and check the unit price and subtotal of SO line
with Form(so) as order:
order.date_order = '2017-12-30'
with order.order_line.edit(0) as line:
line.product_id = support_product
self.assertEqual(so.order_line[0].price_unit, 50, "Second date pricelist rule not applied")
self.assertEqual(so.order_line[0].price_subtotal, so.order_line[0].price_unit * so.order_line[0].product_uom_qty, 'Total of SO line should be a multiplication of unit price and ordered quantity')
def test_pricelist_uom_discount(self):
""" Test prices and discounts are correctly applied based on date and uom"""
computer_case = self.env['product.product'].create({
'name': 'Drawer Black',
'list_price': 100,
})
partner = self.res_partner_model.create(dict(name="George"))
categ_unit_id = self.ref('uom.product_uom_categ_unit')
goup_discount_id = self.ref('product.group_discount_per_so_line')
self.env.user.write({'groups_id': [(4, goup_discount_id, 0)]})
new_uom = self.env['uom.uom'].create({
'name': '10 units',
'factor_inv': 10,
'uom_type': 'bigger',
'rounding': 1.0,
'category_id': categ_unit_id
})
christmas_pricelist = self.env['product.pricelist'].create({
'name': 'Christmas pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'date_start': "2017-12-01",
'date_end': "2017-12-30",
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'Christmas discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2017-12-20',
'pricelist_id': christmas_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
order_line.product_uom_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 90, "Christmas discount pricelist rule not applied")
self.assertEqual(order_line.discount, 10, "Christmas discount not equalt to 10%")
order_line.product_uom = new_uom
order_line.product_uom_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 900, "Christmas discount pricelist rule not applied")
self.assertEqual(order_line.discount, 10, "Christmas discount not equalt to 10%")
def test_pricelist_based_on_other(self):
""" Test price and discount are correctly applied with a pricelist based on an other one"""
computer_case = self.env['product.product'].create({
'name': 'Drawer Black',
'list_price': 100,
})
partner = self.res_partner_model.create(dict(name="George"))
goup_discount_id = self.ref('product.group_discount_per_so_line')
self.env.user.write({'groups_id': [(4, goup_discount_id, 0)]})
first_pricelist = self.env['product.pricelist'].create({
'name': 'First pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'First discount'
})]
})
second_pricelist = self.env['product.pricelist'].create({
'name': 'Second pricelist',
'discount_policy': 'without_discount',
'item_ids': [(0, 0, {
'compute_price': 'formula',
'base': 'pricelist',
'base_pricelist_id': first_pricelist.id,
'price_discount': 10,
'applied_on': '3_global',
'name': 'Second discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2018-07-11',
'pricelist_id': second_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
order_line._onchange_discount()
self.assertEqual(order_line.price_subtotal, 81, "Second pricelist rule not applied")
self.assertEqual(order_line.discount, 19, "Second discount not applied")
def test_pricelist_with_other_currency(self):
""" Test prices are correctly applied with a pricelist with an other currency"""
computer_case = self.env['product.product'].create({
'name': 'Drawer Black',
'list_price': 100,
})
computer_case.list_price = 100
partner = self.res_partner_model.create(dict(name="George"))
categ_unit_id = self.ref('uom.product_uom_categ_unit')
other_currency = self.env['res.currency'].create({'name': 'other currency',
'symbol': 'other'})
self.env['res.currency.rate'].create({'name': '2018-07-11',
'rate': 2.0,
'currency_id': other_currency.id,
'company_id': self.env.company.id})
self.env['res.currency.rate'].search(
[('currency_id', '=', self.env.company.currency_id.id)]
).unlink()
new_uom = self.env['uom.uom'].create({
'name': '10 units',
'factor_inv': 10,
'uom_type': 'bigger',
'rounding': 1.0,
'category_id': categ_unit_id
})
# This pricelist doesn't show the discount
first_pricelist = self.env['product.pricelist'].create({
'name': 'First pricelist',
'currency_id': other_currency.id,
'discount_policy': 'with_discount',
'item_ids': [(0, 0, {
'compute_price': 'percentage',
'base': 'list_price',
'percent_price': 10,
'applied_on': '3_global',
'name': 'First discount'
})]
})
so = self.env['sale.order'].create({
'partner_id': partner.id,
'date_order': '2018-07-12',
'pricelist_id': first_pricelist.id,
})
order_line = self.env['sale.order.line'].new({
'order_id': so.id,
'product_id': computer_case.id,
})
# force compute uom and prices
order_line.product_id_change()
self.assertEqual(order_line.price_unit, 180, "First pricelist rule not applied")
order_line.product_uom = new_uom
order_line.product_uom_change()
self.assertEqual(order_line.price_unit, 1800, "First pricelist rule not applied")
|
agpl-3.0
|
garg10may/youtube-dl
|
youtube_dl/extractor/techtalks.py
|
170
|
2432
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
get_element_by_attribute,
clean_html,
)
class TechTalksIE(InfoExtractor):
_VALID_URL = r'https?://techtalks\.tv/talks/[^/]*/(?P<id>\d+)/'
_TEST = {
'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
'info_dict': {
'id': '57758',
'title': 'Learning Topic Models --- Going beyond SVD',
},
'playlist': [
{
'info_dict': {
'id': '57758',
'ext': 'flv',
'title': 'Learning Topic Models --- Going beyond SVD',
},
},
{
'info_dict': {
'id': '57758-slides',
'ext': 'flv',
'title': 'Learning Topic Models --- Going beyond SVD',
},
},
],
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
talk_id = mobj.group('id')
webpage = self._download_webpage(url, talk_id)
rtmp_url = self._search_regex(
r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url')
play_path = self._search_regex(
r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
webpage, 'presenter play path')
title = clean_html(get_element_by_attribute('class', 'title', webpage))
video_info = {
'id': talk_id,
'title': title,
'url': rtmp_url,
'play_path': play_path,
'ext': 'flv',
}
m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
if m_slides is None:
return video_info
else:
return {
'_type': 'playlist',
'id': talk_id,
'title': title,
'entries': [
video_info,
# The slides video
{
'id': talk_id + '-slides',
'title': title,
'url': rtmp_url,
'play_path': m_slides.group(1),
'ext': 'flv',
},
],
}
|
unlicense
|
Tokyo-Buffalo/tokyosouth
|
env/lib/python3.6/site-packages/twisted/python/test/test_appdirs.py
|
16
|
1081
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the data directory support.
"""
from __future__ import division, absolute_import
try:
from twisted.python import _appdirs
except ImportError:
_appdirs = None
from twisted.trial import unittest
class AppdirsTests(unittest.TestCase):
"""
Tests for L{_appdirs}.
"""
if not _appdirs:
skip = "appdirs package not installed"
def test_moduleName(self):
"""
Calling L{appdirs.getDataDirectory} will return a user data directory
in the system convention, with the module of the caller as the
subdirectory.
"""
res = _appdirs.getDataDirectory()
self.assertTrue(res.endswith("twisted.python.test.test_appdirs"))
def test_manual(self):
"""
Calling L{appdirs.getDataDirectory} with a C{moduleName} argument will
make a data directory with that name instead.
"""
res = _appdirs.getDataDirectory("foo.bar.baz")
self.assertTrue(res.endswith("foo.bar.baz"))
|
mit
|
barrywardell/git-module
|
GitSuperRepository.py
|
1
|
15137
|
# GitSuperRepository.py
#
# Copyright (C) 2011 Barry Wardell <barry.wardell@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this library; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
==========================
GitSuperRepository Package
==========================
The GitSuperRepository package provides the GitSuperRepository class, which
presents an Python interface to a git repository. This repository may contain
submodules which are stored upstream in various version control systems
including git, mercurial and svn.
"""
from __future__ import print_function
import pprint, sys, os, re
import tempfile
from subprocess import call, CalledProcessError
# subprocess.check_output is only available in newer Python versions
try:
from subprocess import check_output
except ImportError:
def check_output(x, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0):
"""Emulate the check_output function provided in newer versions of Python."""
return subprocess.Popen(x, bufsize, executable, stdin, subprocess.PIPE, stderr, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags).communicate()[0]
class GitSuperRepository():
"""
Creating a GitSuperRepository object binds the object to a specific git
repository.
"""
def __init__(self, path=None):
"""
Create a GitSuperRepository object to manage a git repository.
The root of the git repository is assumed to be 'path'. If this is not
specified, then it is assumed to be the current working directory.
"""
self.__path = path
self.__git_dir = os.path.join(path, '.git')
self.__dot_gitmodules = os.path.join(self.__path, '.gitmodules')
# Check we have a git repository
if not os.path.isdir(self.__git_dir) or \
not os.path.isfile(self.__dot_gitmodules):
raise ValueError(self.__git_dir + ' is not a git super-repository')
def __num_lines(self, test):
"""Count the number of lines in a string."""
for i, l in enumerate(test.split('\n')):
pass
return i + 1
def git_command(self, command, module=None, exceptions=True):
"""Execute a git command on the repository."""
if module == None:
git_dir = '--git-dir=' + self.__git_dir
work_tree = '--work-tree=' + self.__path
if exceptions:
return check_output(['git', git_dir, work_tree] + command, cwd=self.__path).decode('utf_8').rstrip('\n')
else:
try:
output = check_output(['git', git_dir, work_tree] + command, cwd=self.__path).decode('utf_8').rstrip('\n')
except CalledProcessError as e:
print(e.output, end='')
else:
self.assert_is_submodule(module)
module_abspath = os.path.join(self.__path, module)
git_dir = '--git-dir=' + os.path.join(module_abspath, '.git')
work_tree = '--work-tree=' + module_abspath
if exceptions:
# TODO: find a better way of dealing with weird characters
return check_output(['git', git_dir, work_tree] + command, cwd=module_abspath).decode('utf_8').rstrip('\n')
else:
try:
output = check_output(['git', git_dir, work_tree] + command, cwd=module_abspath).decode('utf_8').rstrip('\n')
except CalledProcessError as e:
print(e.output, end='')
def config(self, command, module=None, file=None):
"""Configure the repository."""
if file != None:
command = ['--file=' + file] + command
return self.git_command(['config'] + command, module)
def get_gitmodules_config(self, module, option):
"""Get a gitmodules configuration option for a submodule."""
return self.config(['submodule.' + module + '.' + option],
file=self.__dot_gitmodules)
def set_gitmodules_config(self, module, option, value):
"""Set a gitmodules configuration option for a submodule."""
return self.config(['submodule.' + module + '.' + option, value],
file=self.__dot_gitmodules)
def is_submodule(self, path):
"""Check if path is a submodule."""
output = self.git_command(['ls-files', '--stage', '--', path])
if(self.__num_lines(output) != 1):
return False
if output[0:6] == '160000':
return True
else:
return False
def assert_is_submodule(self, path):
"""Raise an exception if path is not a valid submodule."""
if not self.is_submodule(path):
raise ValueError('Error: ' + path + ' is not a submodule.')
def upstream_type(self, path):
"""Get version control system used by upstream repository."""
return self.get_gitmodules_config(path, 'upstreamtype')
def upstream_url(self, path):
"""Get URL of upstream repository."""
return self.get_gitmodules_config(path, 'upstreamurl')
def revision(self, path):
"""Get branch of upstream repository which should be tracked by a submodule."""
return self.get_gitmodules_config(path, 'revision')
def set_upstream_type(self, path, type):
"""Set version control system used by upstream repository."""
self.set_gitmodules_config(path, 'upstreamtype', type)
def set_upstream_url(self, path, url):
"""Set URL of upstream repository."""
self.set_gitmodules_config(path, 'upstreamurl', url)
def set_revision(self, path, revision):
"""Set branch of upstream repository which should be tracked by a submodule."""
self.set_gitmodules_config(path, 'revision', revision)
def upstream_init(self, path):
"""Initialise a submodule for pushing patches upstream."""
self.assert_is_submodule(path)
path = path.rstrip('/')
type = self.upstream_type(path)
url = self.upstream_url(path)
print('Initialising submodule ' + type + ' upstream repository for ' + path + '\nwith upstream URL ' + url)
if type == 'svn':
rev = self.revision(path)
self.git_command(['checkout', rev], module=path)
self.git_command(['svn', 'init', '-s', '--prefix=origin/', url], module=path)
self.git_command(['svn', 'fetch'], module=path)
elif type == 'git':
self.git_command(['remote', 'set-url', '--push', 'origin', url], module=path)
elif type == 'hg':
hgpath = path+'.hg'
call(['hg', 'clone', url, hgpath])
hgrc = open(os.path.join(hgpath,'.hg/hgrc'), 'a')
hgrc.write('\n[path]\ngit = '+path+'\n\n[git]\nintree = 1\n')
hgrc.close()
call(['hg', '-R', hgpath, 'bookmark', 'master', '-r', 'default'])
call(['hg', '-R', hgpath, 'gexport'])
call(['hg', '-R', hgpath, 'pull', 'git'])
else:
print('Unknown upstream repository type: ' + type)
return
def mv_submodule(self, old, new):
"""Move a submodule."""
self.assert_is_submodule(old)
self.config(['--rename-section', 'submodule.'+old, 'submodule.'+new])
self.config(['--rename-section', 'submodule.'+old, 'submodule.'+new],
file=self.__dot_gitmodules)
self.config(['submodule.'+new+'.path', new], file=self.__dot_gitmodules)
self.git_command(['rm', '--cached', old])
os.rename(old, new)
self.git_command(['add', new])
self.git_command(['add', self.__dot_gitmodules])
def rm_submodule(self, old):
"""Remove a submodule."""
self.assert_is_submodule(old)
self.config(['--remove-section', 'submodule.'+old])
self.config(['--remove-section', 'submodule.'+old],
file=self.__dot_gitmodules)
self.git_command(['rm', '--cached', old])
self.git_command(['add', self.__dot_gitmodules])
def add_submodule(self, path, url, upstreamurl, type, revision):
"""Add a submodule."""
check_output(['git', 'submodule', 'add', url, path])
self.set_upstream_url(path, upstreamurl)
self.set_upstream_type(path, type)
self.set_revision(path, revision)
self.git_command(['add', self.__dot_gitmodules])
def list_submodules(self, gitmodules_file=None):
"""List all submodules."""
if gitmodules_file == None:
gitmodules_file = self.__dot_gitmodules
f = open(gitmodules_file)
pattern = re.compile('\[submodule "(.*)"]')
modules = []
for line in f:
module = pattern.match(line)
if module != None:
modules.append(module.group(1))
f.close()
return modules
def list_branches(self, module=None):
"""
List all local branches. If module is not 'None', list all branches
in that submodule.
"""
res = self.git_command(['branch', '--no-color'], module).splitlines()
branches = []
for branch in res:
branchname = branch.strip(' *')
if branchname != '(no branch)':
branches.append(branch.strip(' *'))
return branches
def stage(self, path):
"""
Stage a file into the index.
"""
self.git_command(['add', path])
def remote_status(self, module, branch):
"""
For the specified branch, lists the commits which are different between
the local and the tracked upstream version of that branch.
"""
try:
remote = self.config(['branch.'+branch+'.remote'], module)
except:
# If the branch doesn't have a remote then return an empty list
return None
remote_ref = self.config(['branch.'+branch+'.merge'], module)
remote_branch = re.sub('^refs/heads/', '', remote_ref)
only_upstream = self.git_command(['rev-list', '--oneline',
branch + '..' + remote + '/' + remote_branch], module).splitlines()
only_upstream2 = []
for commit in only_upstream:
(sha1, title) = commit.split(' ', 1)
only_upstream2.append({'SHA1' : sha1, 'title' : title})
only_downstream = self.git_command(['rev-list', '--oneline',
remote + '/' + remote_branch + '..' + branch], module).splitlines()
only_downstream2 = []
for commit in only_downstream:
(sha1, title) = commit.split(' ', 1)
only_downstream2.append({'SHA1' : sha1, 'title' : title})
return {'only-upstream' : only_upstream2,
'only-downstream' : only_downstream2}
def checkout_modules(self, modules):
"""Checkout a list of submodules to the branches they should be tracking."""
print('Checking out branches in submodules:')
for module in modules:
rev = self.revision(module)
print(' ' + module + ': ' + rev)
self.git_command(['checkout', '-q', rev], module)
def pull_ff(self, modules):
"""Do a fast-forward only pull of a list of submodules."""
print('Updating local branches where possible:')
for module in modules:
rev = self.revision(module)
print(' ' + module + ': ' + rev)
self.git_command(['pull', '--ff-only', '-q'], module, exceptions=False)
def fetch_modules(self, modules):
"""Fetch a list of submodules from their remotes."""
print('Getting updates for submodules:')
for module in modules:
print(' ' + module)
self.git_command(['fetch', '-q'], module, exceptions=False)
def sync_gitmodules(self):
"""Syncronize the super-repository with the information specified in .gitmodules."""
# Copy new .gitmodules to a temporary file
tmpfile = tempfile.NamedTemporaryFile(delete=False, suffix='.gitmodules', dir=self.__path)
new_gitmodules = os.path.join(self.__path, tmpfile.name)
tmpfile.close()
os.rename(self.__dot_gitmodules, new_gitmodules)
self.git_command(['checkout', '--', '.gitmodules'])
# Find out which submodules are new or old (removed)
previous_submodules = set(self.list_submodules())
current_submodules = set(self.list_submodules(gitmodules_file = new_gitmodules))
common_submodules = current_submodules & previous_submodules
new_submodules = current_submodules - common_submodules
old_submodules = previous_submodules - common_submodules
for module in old_submodules:
self.rm_submodule(module)
for module in new_submodules:
path = self.config(['submodule.' + module + '.path'], file=new_gitmodules)
url = self.config(['submodule.' + module + '.url'], file=new_gitmodules)
upstreamurl = self.config(['submodule.' + module + '.upstreamurl'], file=new_gitmodules)
type = self.config(['submodule.' + module + '.upstreamtype'], file=new_gitmodules)
revision = self.config(['submodule.' + module + '.revision'], file=new_gitmodules)
self.add_submodule(path, url, upstreamurl, type, revision)
os.rename(new_gitmodules, self.__dot_gitmodules)
self.git_command(['add', self.__dot_gitmodules])
def stage_submodule(self, module, version):
# See http://serverfault.com/questions/251792/update-git-super-repository-automatically-when-a-submodule-gets-updated
self.git_command(['update-index', '--cacheinfo', '160000', version, module])
def current_submodule_commit(self, module):
return self.git_command(['ls-tree', 'HEAD', module]).split()[2]
def submodule_commits_since(self, module, since):
return self.git_command(['rev-list', "--reverse", since+'..HEAD'],module=module).split()
def commit(self, message, author=None, date=None):
(f, name) = tempfile.mkstemp()
os.write(f,message)
os.close(f)
args = ['commit', '-F', name]
if author != None:
args += ['--author', author]
if date != None:
args += ['--date', date]
self.git_command(args)
os.remove(name)
|
gpl-2.0
|
jitendra29/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/hybi00.py
|
675
|
11294
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides the opening handshake processor for the WebSocket
protocol version HyBi 00.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import logging
import re
import struct
from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket import util
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_default_port
from mod_pywebsocket.handshake._base import get_mandatory_header
from mod_pywebsocket.handshake._base import parse_host_header
from mod_pywebsocket.handshake._base import validate_mandatory_header
_MANDATORY_HEADERS = [
# key, expected value or None
[common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
[common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
]
def _validate_subprotocol(subprotocol):
"""Checks if characters in subprotocol are in range between U+0020 and
U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
requirement.
See the Section 4.1. Opening handshake of the spec.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
# Parameter should be in the range U+0020 to U+007E.
for c in subprotocol:
if not 0x20 <= ord(c) <= 0x7e:
raise HandshakeException(
'Illegal character in subprotocol name: %r' % c)
def _check_header_lines(request, mandatory_headers):
check_request_line(request)
# The expected field names, and the meaning of their corresponding
# values, are as follows.
# |Upgrade| and |Connection|
for key, expected_value in mandatory_headers:
validate_mandatory_header(request, key, expected_value)
def _build_location(request):
"""Build WebSocket location for request."""
location_parts = []
if request.is_https():
location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
else:
location_parts.append(common.WEB_SOCKET_SCHEME)
location_parts.append('://')
host, port = parse_host_header(request)
connection_port = request.connection.local_addr[1]
if port != connection_port:
raise HandshakeException('Header/connection port mismatch: %d/%d' %
(port, connection_port))
location_parts.append(host)
if (port != get_default_port(request.is_https())):
location_parts.append(':')
location_parts.append(str(port))
location_parts.append(request.unparsed_uri)
return ''.join(location_parts)
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
def __init__(self, request, dispatcher):
"""Construct an instance.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
self._logger = util.get_class_logger(self)
self._request = request
self._dispatcher = dispatcher
def do_handshake(self):
"""Perform WebSocket Handshake.
On _request, we set
ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
ws_challenge_md5: WebSocket handshake information.
ws_stream: Frame generation/parsing class.
ws_version: Protocol version.
Raises:
HandshakeException: when any error happened in parsing the opening
handshake request.
"""
# 5.1 Reading the client's opening handshake.
# dispatcher sets it in self._request.
_check_header_lines(self._request, _MANDATORY_HEADERS)
self._set_resource()
self._set_subprotocol()
self._set_location()
self._set_origin()
self._set_challenge_response()
self._set_protocol_version()
self._dispatcher.do_extra_handshake(self._request)
self._send_handshake()
def _set_resource(self):
self._request.ws_resource = self._request.uri
def _set_subprotocol(self):
# |Sec-WebSocket-Protocol|
subprotocol = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if subprotocol is not None:
_validate_subprotocol(subprotocol)
self._request.ws_protocol = subprotocol
def _set_location(self):
# |Host|
host = self._request.headers_in.get(common.HOST_HEADER)
if host is not None:
self._request.ws_location = _build_location(self._request)
# TODO(ukai): check host is this host.
def _set_origin(self):
# |Origin|
origin = self._request.headers_in.get(common.ORIGIN_HEADER)
if origin is not None:
self._request.ws_origin = origin
def _set_protocol_version(self):
# |Sec-WebSocket-Draft|
draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
if draft is not None and draft != '0':
raise HandshakeException('Illegal value for %s: %s' %
(common.SEC_WEBSOCKET_DRAFT_HEADER,
draft))
self._logger.debug('Protocol version is HyBi 00')
self._request.ws_version = common.VERSION_HYBI00
self._request.ws_stream = StreamHixie75(self._request, True)
def _set_challenge_response(self):
# 5.2 4-8.
self._request.ws_challenge = self._get_challenge()
# 5.2 9. let /response/ be the MD5 finterprint of /challenge/
self._request.ws_challenge_md5 = util.md5_hash(
self._request.ws_challenge).digest()
self._logger.debug(
'Challenge: %r (%s)',
self._request.ws_challenge,
util.hexify(self._request.ws_challenge))
self._logger.debug(
'Challenge response: %r (%s)',
self._request.ws_challenge_md5,
util.hexify(self._request.ws_challenge_md5))
def _get_key_value(self, key_field):
key_value = get_mandatory_header(self._request, key_field)
self._logger.debug('%s: %r', key_field, key_value)
# 5.2 4. let /key-number_n/ be the digits (characters in the range
# U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
# interpreted as a base ten integer, ignoring all other characters
# in /key_n/.
try:
key_number = int(re.sub("\\D", "", key_value))
except:
raise HandshakeException('%s field contains no digit' % key_field)
# 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
# in /key_n/.
spaces = re.subn(" ", "", key_value)[1]
if spaces == 0:
raise HandshakeException('%s field contains no space' % key_field)
self._logger.debug(
'%s: Key-number is %d and number of spaces is %d',
key_field, key_number, spaces)
# 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
# then abort the WebSocket connection.
if key_number % spaces != 0:
raise HandshakeException(
'%s: Key-number (%d) is not an integral multiple of spaces '
'(%d)' % (key_field, key_number, spaces))
# 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
part = key_number / spaces
self._logger.debug('%s: Part is %d', key_field, part)
return part
def _get_challenge(self):
# 5.2 4-7.
key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
# 5.2 8. let /challenge/ be the concatenation of /part_1/,
challenge = ''
challenge += struct.pack('!I', key1) # network byteorder int
challenge += struct.pack('!I', key2) # network byteorder int
challenge += self._request.connection.read(8)
return challenge
def _send_handshake(self):
response = []
# 5.2 10. send the following line.
response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
# 5.2 11. send the following fields to the client.
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
response.append(format_header(
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
response.append(format_header(
common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
response.append(format_header(
common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
if self._request.ws_protocol:
response.append(format_header(
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
# 5.2 12. send two bytes 0x0D 0x0A.
response.append('\r\n')
# 5.2 13. send /response/
response.append(self._request.ws_challenge_md5)
raw_response = ''.join(response)
self._request.connection.write(raw_response)
self._logger.debug('Sent server\'s opening handshake: %r',
raw_response)
# vi:sts=4 sw=4 et
|
mpl-2.0
|
jjscarafia/odoo
|
addons/resource/__init__.py
|
448
|
1086
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
charithag/iot-server-appliances
|
IU_workshop/Step 7/python_client/ControlAndPublish/BAMPythonPublisher/Publisher.py
|
15
|
3361
|
#!/usr/bin/env python
import sys, time
# sys.path.append('gen-py')
from ThriftSecureEventTransmissionService import ThriftSecureEventTransmissionService
from ThriftSecureEventTransmissionService.ttypes import *
from thrift import Thrift
from thrift.transport import TSSLSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
# Define publisher class
class Publisher:
client = None
def init(self, ip, port):
# Make SSL socket
self.socket = TSSLSocket.TSSLSocket(ip, port, False)
# Buffering is critical. Raw sockets are very slow
self.transport = TTransport.TBufferedTransport(self.socket)
# Wrap in a protocol
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
def connect(self, username, password):
# Create a client to use the protocol encoder
Publisher.client = ThriftSecureEventTransmissionService.Client(self.protocol)
# Make connection
self.socket.open()
self.transport.open()
self.sessionId = Publisher.client.connect(username, password)
def defineStream(self, streamDef):
# Create Stream Definition
self.streamId = Publisher.client.defineStream(self.sessionId, streamDef)
def publish(self, metaList = [], payloadList = []):
# Build thrift event bundle
event = EventBundle()
event.clearAll();
event.setSessionId(self.sessionId)
event.setEventNum(1)
event.addLongAttribute(time.time()*1000)
event.addStringAttribute(self.streamId)
for meta in metaList[:3]:
event.addStringAttribute(meta)
# print meta
event.addLongAttribute(metaList[3])
for payload in payloadList:
event.addStringAttribute(payload)
# Publish
Publisher.client.publish(event.getEventBundle())
def disconnect(self):
# Disconnect
Publisher.client.disconnect(self.sessionId)
self.transport.close()
self.socket.close()
class EventBundle:
__sessionId = ""
__eventNum = 0
__intAttributeList = []
__longAttributeList = []
__doubleAttributeList = []
__boolAttributeList = []
__stringAttributeList = []
__arbitraryDataMapMap = None
def clearAll(self):
self.__sessionId = ""
self.__eventNum = 0
self.__intAttributeList = []
self.__longAttributeList = []
self.__doubleAttributeList = []
self.__boolAttributeList = []
self.__stringAttributeList = []
self.__arbitraryDataMapMap = None
def setSessionId(self, sessionId):
self.__sessionId = sessionId
def setEventNum(self, num):
self.__eventNum = num
def addIntAttribute(self, attr):
self.__intAttributeList.append(attr)
def addLongAttribute(self, attr):
self.__longAttributeList.append(attr)
def addDoubleAttribute(self, attr):
self.__doubleAttributeList.append(attr)
def addBoolAttribute(self, attr):
self.__boolAttributeList.append(attr)
def addStringAttribute(self, attr):
self.__stringAttributeList.append(attr)
def getEventBundle(self):
return Data.ttypes.ThriftEventBundle(self.__sessionId, self.__eventNum, self.__intAttributeList, self.__longAttributeList, self.__doubleAttributeList, self.__boolAttributeList, self.__stringAttributeList, self.__arbitraryDataMapMap)
|
apache-2.0
|
mbauskar/internal-hr
|
erpnext/selling/report/sales_person_wise_transaction_summary/sales_person_wise_transaction_summary.py
|
10
|
2769
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
data = get_entries(filters)
return columns, data
def get_columns(filters):
if not filters.get("doc_type"):
msgprint(_("Please select the document type first"), raise_exception=1)
return [filters["doc_type"] + ":Link/" + filters["doc_type"] + ":140",
"Customer:Link/Customer:140", "Territory:Link/Territory:100", "Posting Date:Date:100",
"Item Code:Link/Item:120", "Qty:Float:100", "Amount:Currency:120",
"Sales Person:Link/Sales Person:140", "Contribution %:Float:110",
"Contribution Amount:Currency:140"]
def get_entries(filters):
date_field = filters["doc_type"] == "Sales Order" and "transaction_date" or "posting_date"
conditions, items = get_conditions(filters, date_field)
entries = frappe.db.sql("""select dt.name, dt.customer, dt.territory, dt.%s,
dt_item.item_code, dt_item.qty, dt_item.base_amount, st.sales_person,
st.allocated_percentage, dt_item.base_amount*st.allocated_percentage/100
from `tab%s` dt, `tab%s Item` dt_item, `tabSales Team` st
where st.parent = dt.name and dt.name = dt_item.parent and st.parenttype = %s
and dt.docstatus = 1 %s order by st.sales_person, dt.name desc""" %
(date_field, filters["doc_type"], filters["doc_type"], '%s', conditions),
tuple([filters["doc_type"]] + items), as_list=1)
return entries
def get_conditions(filters, date_field):
conditions = ""
if filters.get("company"): conditions += " and dt.company = '%s'" % \
filters["company"].replace("'", "\'")
if filters.get("customer"): conditions += " and dt.customer = '%s'" % \
filters["customer"].replace("'", "\'")
if filters.get("territory"): conditions += " and dt.territory = '%s'" % \
filters["territory"].replace("'", "\'")
if filters.get("from_date"): conditions += " and dt.%s >= '%s'" % \
(date_field, filters["from_date"])
if filters.get("to_date"): conditions += " and dt.%s <= '%s'" % (date_field, filters["to_date"])
if filters.get("sales_person"): conditions += " and st.sales_person = '%s'" % \
filters["sales_person"].replace("'", "\'")
items = get_items(filters)
if items:
conditions += " and dt_item.item_code in (%s)" % ', '.join(['%s']*len(items))
return conditions, items
def get_items(filters):
if filters.get("item_group"): key = "item_group"
elif filters.get("brand"): key = "brand"
else: key = ""
items = []
if key:
items = frappe.db.sql_list("""select name from tabItem where %s = %s""" %
(key, '%s'), (filters[key]))
return items
|
agpl-3.0
|
rnowling/pop-gen-models
|
single-pop/calculate_phist.py
|
2
|
3690
|
import sys
import numpy as np
import matplotlib.pyplot as plt
def ssdwpfunc(individuals, frequencies):
"""
Returns the sums squares deviation within populations from the population frequencies.
individuals[pop] = counts
frequencies[pop, haplotype] = freq
"""
ssdwp = 0.0
n_pop = frequencies.shape[0]
n_haplotypes = frequencies.shape[1]
for pop_idx in xrange(n_pop):
gene_copies = individuals[pop_idx] * 2 # diploid
pop_freq = frequencies[pop_idx, :]
pop_ssd = (np.outer(pop_freq, pop_freq).sum() - np.inner(pop_freq, pop_freq)) / 2.0
ssdwp += gene_copies * pop_ssd
return ssdwp
def ssdtotalfunc(individuals, frequencies):
"""
Calculates the total sum squared deviation for a locus.
individuals[pop] = counts
frequencies[pop, haplotype] = freq
"""
# total number of genes across all populations for a given locus
locus_gene_copies = 2.0 * individuals.sum() # diploid
total_freq = np.sum(frequencies * individuals[:, np.newaxis], axis=0) / individuals.sum()
ssd = locus_gene_copies * (np.outer(total_freq, total_freq).sum() - np.inner(total_freq, total_freq)) / 2.0
return ssd
def onecalcphi(individuals, frequencies):
"""
individuals[pop] = individuals
frequencies[pop][haplotype] = individuals
"""
n_gene_copies = individuals.sum() * 2 # diploid
n_pop = individuals.shape[0]
# calculate the sums squared deviation within populations
ssdwp = ssdwpfunc(individuals, frequencies)
# sums squared deviations total at the locus
ssdtotal = ssdtotalfunc(individuals, frequencies)
# degrees of freedom for between populations
dfb = n_pop - 1
# degrees of freedom for total locus
dfw = n_gene_copies - n_pop
if dfw == 0:
return 0.0
# mean squared deviation within populations
msdwp = ssdwp / dfw
# mean squared deviation among populations
msdap = (ssdtotal - ssdwp)/dfb
# Calculate the variation among populations
varAP = (msdap - msdwp)/(float(n_gene_copies)/n_pop)
if (varAP + msdwp) == 0.0:
return 0.0
# PHIst is the proportion of the variation partitioned among populations
phi_st = varAP/(msdwp + varAP)
assert not(np.isnan(phi_st))
return phi_st
def calc_locus_phi_st(individuals, frequencies):
n_loci = individuals.shape[0]
phi_st = np.zeros(n_loci)
for locus_i in xrange(n_loci):
phi_st[locus_i] = onecalcphi(individuals[locus_i, :], frequencies[locus_i, :, :])
return phi_st
def read_counts(flname):
fl = open(flname)
vector = []
populations = []
for ln in fl:
if "Marker" in ln:
if len(populations) != 0:
vector.append(populations)
populations = []
continue
cols = ln.split()
pop_locus_counts = map(float, cols[2:])
populations.append(pop_locus_counts)
vector.append(populations)
fl.close()
return np.array(vector)
def normalize_haplotypes(counts):
# sum total haplotype counts for each
# population-locus combination
total = np.sum(counts, axis=2)
frequencies = counts / total[:, :, None]
return frequencies
def write_phi(flname, phi_values):
fl = open(flname, "w")
for i, phi in enumerate(phi_values):
fl.write("%s,%s\n" % (i, phi))
fl.close()
if __name__ == "__main__":
occur_fl = sys.argv[1]
out_fl = sys.argv[2]
counts = read_counts(occur_fl)
frequencies = normalize_haplotypes(counts)
individuals = counts.sum(axis=2)
phi_sts = calc_locus_phi_st(individuals, frequencies)
write_phi(out_fl, phi_sts)
|
apache-2.0
|
PaddlePaddle/Paddle
|
tools/diff_use_default_grad_op_maker.py
|
2
|
1758
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import paddle.fluid as fluid
import sys
def generate_spec(filename):
with open(filename, 'w') as f:
ops = fluid.core._get_use_default_grad_op_desc_maker_ops()
for op in ops:
f.write(op + '\n')
def read_spec(filename):
with open(filename, 'r') as f:
return set([line.strip() for line in f.readlines()])
def get_spec_diff(dev_filename, pr_filename):
ops_dev = read_spec(dev_filename)
ops_pr = read_spec(pr_filename)
added_ops = []
removed_ops = []
for op in ops_pr:
if op not in ops_dev:
added_ops.append(op)
else:
removed_ops.append(op)
return added_ops
if len(sys.argv) == 2:
generate_spec(sys.argv[1])
elif len(sys.argv) == 3:
added_ops = get_spec_diff(sys.argv[1], sys.argv[2])
if added_ops:
print(added_ops)
else:
print(
'Usage 1: python diff_use_default_grad_op_maker.py [filepath] to generate new spec file\n'
'Usage 2: python diff_use_default_grad_op_maker.py [dev_filepath] [pr_filepath] to diff spec file'
)
sys.exit(1)
|
apache-2.0
|
GodBlessPP/W17test_2nd_1
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/__init__.py
|
637
|
3505
|
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
|
gpl-3.0
|
czgu/opendataexperience
|
env/lib/python2.7/site-packages/django/contrib/contenttypes/fields.py
|
43
|
22826
|
from __future__ import unicode_literals
from collections import defaultdict
from django.core import checks
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db import models, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import signals, FieldDoesNotExist, DO_NOTHING
from django.db.models.base import ModelBase
from django.db.models.fields.related import ForeignObject, ForeignObjectRel
from django.db.models.related import PathInfo
from django.db.models.sql.datastructures import Col
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from django.utils.deprecation import RenameMethodsBase, RemovedInDjango18Warning
from django.utils.encoding import smart_text, python_2_unicode_compatible
class RenameGenericForeignKeyMethods(RenameMethodsBase):
renamed_methods = (
('get_prefetch_query_set', 'get_prefetch_queryset', RemovedInDjango18Warning),
)
@python_2_unicode_compatible
class GenericForeignKey(six.with_metaclass(RenameGenericForeignKeyMethods)):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id", for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
""" Check if field named `field_name` in model `model` exists and is
valid content_type field (is a ForeignKey to ContentType). """
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=self,
id='contenttypes.E003',
)
]
elif field.rel.to != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instead of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(ForeignObject):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(
self, to,
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
)
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
self.for_concrete_model = kwargs.pop("for_concrete_model", True)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, to_fields=[],
from_fields=[self.object_id_field_name], **kwargs)
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.rel.to
if isinstance(target, ModelBase):
# Using `vars` is very ugly approach, but there is no better one,
# because GenericForeignKeys are not considered as fields and,
# therefore, are not included in `target._meta.local_fields`.
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.rel.to._meta.get_field_by_name(self.object_id_field_name)[0],
self.model._meta.pk)]
def get_path_info(self):
opts = self.rel.to._meta
target = opts.get_field_by_name(self.object_id_field_name)[0]
return [PathInfo(self.model._meta, opts, (target,), self.rel, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.rel.to._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name, virtual_only=True)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self, self.for_concrete_model))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Returns the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.rel.to._meta.get_field_by_name(self.content_type_field_name)[0]
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(Col(remote_alias, field, field), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.rel.to._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field, for_concrete_model=True):
self.field = field
self.for_concrete_model = for_concrete_model
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=self.for_concrete_model)
join_cols = self.field.get_joining_columns(reverse_join=True)[0]
manager = RelatedManager(
model=rel_model,
instance=instance,
source_col_name=qn(join_cols[0]),
target_col_name=qn(join_cols[1]),
content_type=content_type,
content_type_field_name=self.field.content_type_field_name,
object_id_field_name=self.field.object_id_field_name,
prefetch_cache_name=self.field.attname,
)
return manager
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, instance=None, symmetrical=None,
source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None,
prefetch_cache_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.prefetch_cache_name = prefetch_cache_name
self.pk_val = self.instance._get_pk_val()
self.core_filters = {
'%s__pk' % content_type_field_name: content_type.id,
'%s' % object_id_field_name: instance._get_pk_val(),
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__)
return manager_class(
model=self.model,
instance=self.instance,
symmetrical=self.symmetrical,
source_col_name=self.source_col_name,
target_col_name=self.target_col_name,
content_type=self.content_type,
content_type_field_name=self.content_type_field_name,
object_id_field_name=self.object_id_field_name,
prefetch_cache_name=self.prefetch_cache_name,
)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
queryset.delete()
else:
with transaction.commit_on_success_unless_managed(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ForeignObjectRel):
def __init__(self, field, to, related_name=None, limit_choices_to=None, related_query_name=None):
super(GenericRel, self).__init__(field=field, to=to, related_name=related_query_name or '+',
limit_choices_to=limit_choices_to, on_delete=DO_NOTHING,
related_query_name=related_query_name)
|
apache-2.0
|
jonathankablan/prestashop-1.6
|
prestashop_1.7/vendor/doctrine/orm/docs/en/conf.py
|
2448
|
6497
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
gpl-3.0
|
shurihell/testasia
|
common/test/acceptance/tests/studio/test_studio_settings.py
|
32
|
19688
|
# coding: utf-8
"""
Acceptance tests for Studio's Setting pages
"""
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from base_studio_test import StudioCourseTest
from bok_choy.promise import EmptyPromise
from ...fixtures.course import XBlockFixtureDesc
from ..helpers import create_user_partition_json
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.settings import SettingsPage
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.lms.courseware import CoursewarePage
from textwrap import dedent
from xmodule.partitions.partitions import Group
class ContentGroupConfigurationTest(StudioCourseTest):
"""
Tests for content groups in the Group Configurations Page.
There are tests for the experiment groups in test_studio_split_test.
"""
def setUp(self):
super(ContentGroupConfigurationTest, self).setUp()
self.group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 1 problems.
The problem is visible only to Group "alpha".
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
)
def create_and_verify_content_group(self, name, existing_groups):
"""
Creates a new content group and verifies that it was properly created.
"""
self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups))
if existing_groups == 0:
self.group_configurations_page.create_first_content_group()
else:
self.group_configurations_page.add_content_group()
config = self.group_configurations_page.content_groups[existing_groups]
config.name = name
# Save the content group
self.assertEqual(config.get_text('.action-primary'), "Create")
self.assertFalse(config.delete_button_is_present)
config.save()
self.assertIn(name, config.name)
return config
def test_no_content_groups_by_default(self):
"""
Scenario: Ensure that message telling me to create a new content group is
shown when no content groups exist.
Given I have a course without content groups
When I go to the Group Configuration page in Studio
Then I see "You have not created any content groups yet." message
"""
self.group_configurations_page.visit()
self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present)
self.assertIn(
"You have not created any content groups yet.",
self.group_configurations_page.no_content_groups_message_text
)
def test_can_create_and_edit_content_groups(self):
"""
Scenario: Ensure that the content groups can be created and edited correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Create'
Then I see the new content is added and has correct data
And I click 'New Content Group' button
And I set the name and click the button 'Create'
Then I see the second content group is added and has correct data
When I edit the second content group
And I change the name and click the button 'Save'
Then I see the second content group is saved successfully and has the new name
"""
self.group_configurations_page.visit()
self.create_and_verify_content_group("New Content Group", 0)
second_config = self.create_and_verify_content_group("Second Content Group", 1)
# Edit the second content group
second_config.edit()
second_config.name = "Updated Second Content Group"
self.assertEqual(second_config.get_text('.action-primary'), "Save")
second_config.save()
self.assertIn("Updated Second Content Group", second_config.name)
def test_cannot_delete_used_content_group(self):
"""
Scenario: Ensure that the user cannot delete used content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I try to delete the Content Group with name "New Content Group"
Then I see the delete button is disabled.
"""
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,',
'Content Group Partition',
[Group("0", 'alpha')],
scheme="cohort"
)
],
},
})
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
vertical.locator,
XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}),
)
self.group_configurations_page.visit()
config = self.group_configurations_page.content_groups[0]
self.assertTrue(config.delete_button_is_disabled)
def test_can_delete_unused_content_group(self):
"""
Scenario: Ensure that the user can delete unused content group.
Given I have a course with 1 Content Group
And I go to the Group Configuration page
When I delete the Content Group with name "New Content Group"
Then I see that there is no Content Group
When I refresh the page
Then I see that the content group has been deleted
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
self.assertTrue(config.delete_button_is_present)
self.assertEqual(len(self.group_configurations_page.content_groups), 1)
# Delete content group
config.delete()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
self.group_configurations_page.visit()
self.assertEqual(len(self.group_configurations_page.content_groups), 0)
def test_must_supply_name(self):
"""
Scenario: Ensure that validation of the content group works correctly.
Given I have a course without content groups
And I create new content group without specifying a name click the button 'Create'
Then I see error message "Content Group name is required."
When I set a name and click the button 'Create'
Then I see the content group is saved successfully
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.save()
self.assertEqual(config.mode, 'edit')
self.assertEqual("Group name is required", config.validation_message)
config.name = "Content Group Name"
config.save()
self.assertIn("Content Group Name", config.name)
def test_can_cancel_creation_of_content_group(self):
"""
Scenario: Ensure that creation of a content group can be canceled correctly.
Given I have a course without content groups
When I click button 'Add your first Content Group'
And I set new the name and click the button 'Cancel'
Then I see that there is no content groups in the course
"""
self.group_configurations_page.visit()
self.group_configurations_page.create_first_content_group()
config = self.group_configurations_page.content_groups[0]
config.name = "Content Group"
config.cancel()
self.assertEqual(0, len(self.group_configurations_page.content_groups))
def test_content_group_empty_usage(self):
"""
Scenario: When content group is not used, ensure that the link to outline page works correctly.
Given I have a course without content group
And I create new content group
Then I see a link to the outline page
When I click on the outline link
Then I see the outline page
"""
self.group_configurations_page.visit()
config = self.create_and_verify_content_group("New Content Group", 0)
config.toggle()
config.click_outline_anchor()
# Waiting for the page load and verify that we've landed on course outline page
EmptyPromise(
lambda: self.outline_page.is_browser_on_page(), "loaded page {!r}".format(self.outline_page),
timeout=30
).fulfill()
class AdvancedSettingsValidationTest(StudioCourseTest):
"""
Tests for validation feature in Studio's advanced settings tab
"""
def setUp(self):
super(AdvancedSettingsValidationTest, self).setUp()
self.advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.type_fields = ['Course Display Name', 'Advanced Module List', 'Discussion Topic Mapping',
'Maximum Attempts', 'Course Announcement Date']
# Before every test, make sure to visit the page first
self.advanced_settings.visit()
self.assertTrue(self.advanced_settings.is_browser_on_page())
def test_modal_shows_one_validation_error(self):
"""
Test that advanced settings don't save if there's a single wrong input,
and that it shows the correct error message in the modal.
"""
# Feed an integer value for String field.
# .set method saves automatically after setting a value
course_display_name = self.advanced_settings.get('Course Display Name')
self.advanced_settings.set('Course Display Name', 1)
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(['Course Display Name'])
self.advanced_settings.refresh_and_wait_for_load()
self.assertEquals(
self.advanced_settings.get('Course Display Name'),
course_display_name,
'Wrong input for Course Display Name must not change its value'
)
def test_modal_shows_multiple_validation_errors(self):
"""
Test that advanced settings don't save with multiple wrong inputs
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
# Test Modal
self.check_modal_shows_correct_contents(self.type_fields)
self.advanced_settings.refresh_and_wait_for_load()
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Wrong input for Advanced Settings Fields must not change its value'
)
def test_undo_changes(self):
"""
Test that undo changes button in the modal resets all settings changes
"""
# Save original values and feed wrong inputs
original_values_map = self.get_settings_fields_of_each_type()
self.set_wrong_inputs_to_fields()
# Let modal popup
self.advanced_settings.wait_for_modal_load()
# Click Undo Changes button
self.advanced_settings.undo_changes_via_modal()
# Check that changes are undone
for key, val in original_values_map.iteritems():
self.assertEquals(
self.advanced_settings.get(key),
val,
'Undoing Should revert back to original value'
)
def test_manual_change(self):
"""
Test that manual changes button in the modal keeps settings unchanged
"""
inputs = {"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
self.set_wrong_inputs_to_fields()
self.advanced_settings.wait_for_modal_load()
self.advanced_settings.trigger_manual_changes()
# Check that the validation modal went away.
self.assertFalse(self.advanced_settings.is_validation_modal_present())
# Iterate through the wrong values and make sure they're still displayed
for key, val in inputs.iteritems():
self.assertEquals(
str(self.advanced_settings.get(key)),
str(val),
'manual change should keep: ' + str(val) + ', but is: ' + str(self.advanced_settings.get(key))
)
def check_modal_shows_correct_contents(self, wrong_settings_list):
"""
Helper function that checks if the validation modal contains correct
error messages.
"""
# Check presence of modal
self.assertTrue(self.advanced_settings.is_validation_modal_present())
# List of wrong settings item & what is presented in the modal should be the same
error_item_names = self.advanced_settings.get_error_item_names()
self.assertEqual(set(wrong_settings_list), set(error_item_names))
error_item_messages = self.advanced_settings.get_error_item_messages()
self.assertEqual(len(error_item_names), len(error_item_messages))
def get_settings_fields_of_each_type(self):
"""
Get one of each field type:
- String: Course Display Name
- List: Advanced Module List
- Dict: Discussion Topic Mapping
- Integer: Maximum Attempts
- Date: Course Announcement Date
"""
return {
"Course Display Name": self.advanced_settings.get('Course Display Name'),
"Advanced Module List": self.advanced_settings.get('Advanced Module List'),
"Discussion Topic Mapping": self.advanced_settings.get('Discussion Topic Mapping'),
"Maximum Attempts": self.advanced_settings.get('Maximum Attempts'),
"Course Announcement Date": self.advanced_settings.get('Course Announcement Date'),
}
def set_wrong_inputs_to_fields(self):
"""
Set wrong values for the chosen fields
"""
self.advanced_settings.set_values(
{
"Course Display Name": 1,
"Advanced Module List": 1,
"Discussion Topic Mapping": 1,
"Maximum Attempts": '"string"',
"Course Announcement Date": '"string"',
}
)
def test_only_expected_fields_are_displayed(self):
"""
Scenario: The Advanced Settings screen displays settings/fields not specifically hidden from
view by a developer.
Given I have a set of CourseMetadata fields defined for the course
When I view the Advanced Settings screen for the course
The total number of fields displayed matches the number I expect
And the actual fields displayed match the fields I expect to see
"""
expected_fields = self.advanced_settings.expected_settings_names
displayed_fields = self.advanced_settings.displayed_settings_names
self.assertEquals(set(displayed_fields), set(expected_fields))
@attr('shard_1')
class ContentLicenseTest(StudioCourseTest):
"""
Tests for course-level licensing (that is, setting the license,
for an entire course's content, to All Rights Reserved or Creative Commons)
"""
def setUp(self): # pylint: disable=arguments-differ
super(ContentLicenseTest, self).setUp()
self.outline_page = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.lms_courseware = CoursewarePage(
self.browser,
self.course_id,
)
self.settings_page.visit()
def test_empty_license(self):
"""
When I visit the Studio settings page,
I see that the course license is "All Rights Reserved" by default.
Then I visit the LMS courseware page,
and I see that the default course license is displayed.
"""
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_arr_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "All Rights Reserved",
and I refresh the page,
I see that the course license is "All Rights Reserved".
Then I visit the LMS courseware page,
and I see that the course license is "All Rights Reserved".
"""
self.settings_page.course_license = "All Rights Reserved"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "All Rights Reserved")
self.lms_courseware.visit()
self.assertEqual(self.lms_courseware.course_license, "© All Rights Reserved")
def test_cc_license(self):
"""
When I visit the Studio settings page,
and I set the course license to "Creative Commons",
and I refresh the page,
I see that the course license is "Creative Commons".
Then I visit the LMS courseware page,
and I see that the course license is "Some Rights Reserved".
"""
self.settings_page.course_license = "Creative Commons"
self.settings_page.save_changes()
self.settings_page.refresh_and_wait_for_load()
self.assertEqual(self.settings_page.course_license, "Creative Commons")
self.lms_courseware.visit()
# The course_license text will include a bunch of screen reader text to explain
# the selected options
self.assertIn("Some Rights Reserved", self.lms_courseware.course_license)
|
agpl-3.0
|
sagersmith8/ai_graph_coloring
|
ai_graph_color/algorithms/min_conflicts.py
|
1
|
3360
|
import random
params = {}
def run(graph, setup, params):
"""
Color a graph using min-conflicts:
First generate a random coloring for the graph.
Until there are no conflicts in the graph,
choose a random node in the graph, and change it to have the color
which reduces the number of conflicts in the graph the most.
:param colors: the number of colors to color the graph with
:type colors: int
"""
num_colors = params['colors']
colors = range(num_colors)
setup.logger.debug(
'Trying to color min-conflicts with %s colors', num_colors
)
coloring = [random.choice(colors) for _ in graph]
setup.logger.debug('Initial coloring: %s', coloring)
num_conflicts = num_conflicts_graph(graph, coloring)
setup.logger.debug('Initial conflicts: %s', num_conflicts)
if setup.counter.increment():
yield num_conflicts
while num_conflicts > 0:
index = random.randint(0, len(graph) - 1)
setup.logger.debug('Selected node: %s', index)
initial_conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
initial_color = coloring[index]
min_conflicts = initial_conflicts
min_conflicts_value = initial_color
colors = range(num_colors)
colors.remove(initial_color) # don't recheck the same color
for color in colors:
coloring[index] = color
conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
if conflicts < min_conflicts:
min_conflicts = conflicts
min_conflicts_value = color
coloring[index] = min_conflicts_value
setup.logger.debug('Updated coloring: %s', coloring)
num_conflicts -= initial_conflicts - min_conflicts
setup.logger.debug('Updated conflicts: %s', num_conflicts)
yield num_conflicts
def num_conflicts_graph(graph, coloring):
"""
Compute the number of conflicting edges on a graph for a given
coloring.
:param graph: the graph, in adjacency list form
:type graph: list[list[int]]
:param coloring: the coloring of the graph
:type coloring: list[int]
:rtype: int
:return: the number of conflicting edges for the coloring of the
given graph.
"""
conflicts = 0
for from_index, connections in enumerate(graph):
for to_index in connections:
if (from_index < to_index and
coloring[to_index] == coloring[from_index]):
conflicts += 1
return conflicts
def num_conflicts_node(graph, index, coloring):
"""
Compute the number of conflicting edges coming from a particular
node on a graph with a particular coloring.
:param graph: a graph in adjacency list form
:type graph: list[list[int]]
:param index: the index of the node in the graph
:type index: int
:param coloring: the coloring of the graph
:type coloring: list[int]
:return: the number of conflicting edges coming from the given
node for the given coloring
"""
conflicts = 0
for to_index in graph[index]:
if coloring[index] == coloring[to_index]:
conflicts += 1
return conflicts
|
mit
|
camptocamp/ngo-addons-backport
|
addons/account/wizard/account_move_line_reconcile_select.py
|
56
|
2399
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_line_reconcile_select(osv.osv_memory):
_name = "account.move.line.reconcile.select"
_description = "Move line reconcile select"
_columns = {
'account_id': fields.many2one('account.account', 'Account', \
domain = [('reconcile', '=', 1)], required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
This function Open account move line window for reconcile on given account id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move line reconcile select’s ID or list of IDs
@return: dictionary of Open account move line window for reconcile on given account id
"""
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','=',False),('state','<>','draft')]" % data['account_id'],
'name': _('Reconciliation'),
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
account_move_line_reconcile_select()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/models/editor_subscribe_label_deleted.py
|
1
|
1251
|
"""
.. module:: editor_subscribe_label_deleted
The **Editor Subscribe Label Deleted** Model.
PostgreSQL Definition
---------------------
The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE editor_subscribe_label_deleted
(
editor INTEGER NOT NULL, -- PK, references editor.id
gid UUID NOT NULL, -- PK, references deleted_entity.gid
deleted_by INTEGER NOT NULL -- references edit.id
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class editor_subscribe_label_deleted(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param editor: references :class:`.editor`
:param gid: references :class:`.deleted_entity`
:param deleted_by: references :class:`.edit`
"""
editor = models.OneToOneField('editor', primary_key=True)
gid = models.OneToOneField('deleted_entity')
deleted_by = models.ForeignKey('edit')
def __str__(self):
return 'Editor Subscribe Label Deleted'
class Meta:
db_table = 'editor_subscribe_label_deleted'
|
gpl-2.0
|
legnaleurc/tornado
|
demos/facebook/facebook.py
|
12
|
4412
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import tornado.auth
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
define("facebook_api_key", help="your Facebook application API key", type=str)
define("facebook_secret", help="your Facebook application secret", type=str)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login", AuthLoginHandler),
(r"/auth/logout", AuthLogoutHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
login_url="/auth/login",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
facebook_api_key=options.facebook_api_key,
facebook_secret=options.facebook_secret,
ui_modules={"Post": PostModule},
debug=True,
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("fbdemo_user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
class MainHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request("/me/home", self._on_stream,
access_token=self.current_user["access_token"])
def _on_stream(self, stream):
if stream is None:
# Session may have expired
self.redirect("/auth/login")
return
self.render("stream.html", stream=stream)
class AuthLoginHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
def get(self):
my_url = (self.request.protocol + "://" + self.request.host +
"/auth/login?next=" +
tornado.escape.url_escape(self.get_argument("next", "/")))
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self._on_auth)
return
self.authorize_redirect(redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "user_posts"})
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
self.set_secure_cookie("fbdemo_user", tornado.escape.json_encode(user))
self.redirect(self.get_argument("next", "/"))
class AuthLogoutHandler(BaseHandler, tornado.auth.FacebookGraphMixin):
def get(self):
self.clear_cookie("fbdemo_user")
self.redirect(self.get_argument("next", "/"))
class PostModule(tornado.web.UIModule):
def render(self, post):
return self.render_string("modules/post.html", post=post)
def main():
tornado.options.parse_command_line()
if not (options.facebook_api_key and options.facebook_secret):
print("--facebook_api_key and --facebook_secret must be set")
return
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
apache-2.0
|
Vrturo/FrontEndLabz
|
techTutorials/webpack/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
1446
|
65937
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
mit
|
SunghanKim/numpy
|
numpy/_import_tools.py
|
93
|
13292
|
from __future__ import division, absolute_import, print_function
import os
import sys
import warnings
__all__ = ['PackageLoader']
class PackageLoader(object):
def __init__(self, verbose=False, infunc=False):
""" Manages loading packages.
"""
if infunc:
_level = 2
else:
_level = 1
self.parent_frame = frame = sys._getframe(_level)
self.parent_name = eval('__name__', frame.f_globals, frame.f_locals)
parent_path = eval('__path__', frame.f_globals, frame.f_locals)
if isinstance(parent_path, str):
parent_path = [parent_path]
self.parent_path = parent_path
if '__all__' not in frame.f_locals:
exec('__all__ = []', frame.f_globals, frame.f_locals)
self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals)
self.info_modules = {}
self.imported_packages = []
self.verbose = None
def _get_info_files(self, package_dir, parent_path, parent_package=None):
""" Return list of (package name,info.py file) from parent_path subdirectories.
"""
from glob import glob
files = glob(os.path.join(parent_path, package_dir, 'info.py'))
for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')):
if info_file[:-1] not in files:
files.append(info_file)
info_files = []
for info_file in files:
package_name = os.path.dirname(info_file[len(parent_path)+1:])\
.replace(os.sep, '.')
if parent_package:
package_name = parent_package + '.' + package_name
info_files.append((package_name, info_file))
info_files.extend(self._get_info_files('*',
os.path.dirname(info_file),
package_name))
return info_files
def _init_info_modules(self, packages=None):
"""Initialize info_modules = {<package_name>: <package info.py module>}.
"""
import imp
info_files = []
info_modules = self.info_modules
if packages is None:
for path in self.parent_path:
info_files.extend(self._get_info_files('*', path))
else:
for package_name in packages:
package_dir = os.path.join(*package_name.split('.'))
for path in self.parent_path:
names_files = self._get_info_files(package_dir, path)
if names_files:
info_files.extend(names_files)
break
else:
try:
exec('import %s.info as info' % (package_name))
info_modules[package_name] = info
except ImportError as msg:
self.warn('No scipy-style subpackage %r found in %s. '\
'Ignoring: %s'\
% (package_name, ':'.join(self.parent_path), msg))
for package_name, info_file in info_files:
if package_name in info_modules:
continue
fullname = self.parent_name +'.'+ package_name
if info_file[-1]=='c':
filedescriptor = ('.pyc', 'rb', 2)
else:
filedescriptor = ('.py', 'U', 1)
try:
info_module = imp.load_module(fullname+'.info',
open(info_file, filedescriptor[1]),
info_file,
filedescriptor)
except Exception as msg:
self.error(msg)
info_module = None
if info_module is None or getattr(info_module, 'ignore', False):
info_modules.pop(package_name, None)
else:
self._init_info_modules(getattr(info_module, 'depends', []))
info_modules[package_name] = info_module
return
def _get_sorted_names(self):
""" Return package names sorted in the order as they should be
imported due to dependence relations between packages.
"""
depend_dict = {}
for name, info_module in self.info_modules.items():
depend_dict[name] = getattr(info_module, 'depends', [])
package_names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
package_names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
package_names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return package_names
def __call__(self,*packages, **options):
"""Load one or more packages into parent package top-level namespace.
This function is intended to shorten the need to import many
subpackages, say of scipy, constantly with statements such as
import scipy.linalg, scipy.fftpack, scipy.etc...
Instead, you can say:
import scipy
scipy.pkgload('linalg','fftpack',...)
or
scipy.pkgload()
to load all of them in one call.
If a name which doesn't exist in scipy's namespace is
given, a warning is shown.
Parameters
----------
*packages : arg-tuple
the names (one or more strings) of all the modules one
wishes to load into the top-level namespace.
verbose= : integer
verbosity level [default: -1].
verbose=-1 will suspend also warnings.
force= : bool
when True, force reloading loaded packages [default: False].
postpone= : bool
when True, don't load packages [default: False]
"""
# 2014-10-29, 1.10
warnings.warn('pkgload and PackageLoader are obsolete '
'and will be removed in a future version of numpy',
DeprecationWarning)
frame = self.parent_frame
self.info_modules = {}
if options.get('force', False):
self.imported_packages = []
self.verbose = verbose = options.get('verbose', -1)
postpone = options.get('postpone', None)
self._init_info_modules(packages or None)
self.log('Imports to %r namespace\n----------------------------'\
% self.parent_name)
for package_name in self._get_sorted_names():
if package_name in self.imported_packages:
continue
info_module = self.info_modules[package_name]
global_symbols = getattr(info_module, 'global_symbols', [])
postpone_import = getattr(info_module, 'postpone_import', False)
if (postpone and not global_symbols) \
or (postpone_import and postpone is not None):
continue
old_object = frame.f_locals.get(package_name, None)
cmdstr = 'import '+package_name
if self._execcmd(cmdstr):
continue
self.imported_packages.append(package_name)
if verbose!=-1:
new_object = frame.f_locals.get(package_name)
if old_object is not None and old_object is not new_object:
self.warn('Overwriting %s=%s (was %s)' \
% (package_name, self._obj2repr(new_object),
self._obj2repr(old_object)))
if '.' not in package_name:
self.parent_export_names.append(package_name)
for symbol in global_symbols:
if symbol=='*':
symbols = eval('getattr(%s,"__all__",None)'\
% (package_name),
frame.f_globals, frame.f_locals)
if symbols is None:
symbols = eval('dir(%s)' % (package_name),
frame.f_globals, frame.f_locals)
symbols = [s for s in symbols if not s.startswith('_')]
else:
symbols = [symbol]
if verbose!=-1:
old_objects = {}
for s in symbols:
if s in frame.f_locals:
old_objects[s] = frame.f_locals[s]
cmdstr = 'from '+package_name+' import '+symbol
if self._execcmd(cmdstr):
continue
if verbose!=-1:
for s, old_object in old_objects.items():
new_object = frame.f_locals[s]
if new_object is not old_object:
self.warn('Overwriting %s=%s (was %s)' \
% (s, self._obj2repr(new_object),
self._obj2repr(old_object)))
if symbol=='*':
self.parent_export_names.extend(symbols)
else:
self.parent_export_names.append(symbol)
return
def _execcmd(self, cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
try:
exec (cmdstr, frame.f_globals, frame.f_locals)
except Exception as msg:
self.error('%s -> failed: %s' % (cmdstr, msg))
return True
else:
self.log('%s -> success' % (cmdstr))
return
def _obj2repr(self, obj):
""" Return repr(obj) with"""
module = getattr(obj, '__module__', None)
file = getattr(obj, '__file__', None)
if module is not None:
return repr(obj) + ' from ' + module
if file is not None:
return repr(obj) + ' from ' + file
return repr(obj)
def log(self, mess):
if self.verbose>1:
print(str(mess), file=sys.stderr)
def warn(self, mess):
if self.verbose>=0:
print(str(mess), file=sys.stderr)
def error(self, mess):
if self.verbose!=-1:
print(str(mess), file=sys.stderr)
def _get_doc_title(self, info_module):
""" Get the title from a package info.py file.
"""
title = getattr(info_module, '__doc_title__', None)
if title is not None:
return title
title = getattr(info_module, '__doc__', None)
if title is not None:
title = title.lstrip().split('\n', 1)[0]
return title
return '* Not Available *'
def _format_titles(self,titles,colsep='---'):
display_window_width = 70 # How to determine the correct value in runtime??
lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0]
max_length = max(lengths)
lines = []
for (name, title) in titles:
name = name[name.find('.')+1:]
w = max_length - len(name)
words = title.split()
line = '%s%s %s' % (name, w*' ', colsep)
tab = len(line) * ' '
while words:
word = words.pop(0)
if len(line)+len(word)>display_window_width:
lines.append(line)
line = tab
line += ' ' + word
else:
lines.append(line)
return '\n'.join(lines)
def get_pkgdocs(self):
""" Return documentation summary of subpackages.
"""
import sys
self.info_modules = {}
self._init_info_modules(None)
titles = []
symbols = []
for package_name, info_module in self.info_modules.items():
global_symbols = getattr(info_module, 'global_symbols', [])
fullname = self.parent_name +'.'+ package_name
note = ''
if fullname not in sys.modules:
note = ' [*]'
titles.append((fullname, self._get_doc_title(info_module) + note))
if global_symbols:
symbols.append((package_name, ', '.join(global_symbols)))
retstr = self._format_titles(titles) +\
'\n [*] - using a package requires explicit import (see pkgload)'
if symbols:
retstr += """\n\nGlobal symbols from subpackages"""\
"""\n-------------------------------\n""" +\
self._format_titles(symbols, '-->')
return retstr
class PackageLoaderDebug(PackageLoader):
def _execcmd(self, cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
print('Executing', repr(cmdstr), '...', end=' ')
sys.stdout.flush()
exec (cmdstr, frame.f_globals, frame.f_locals)
print('ok')
sys.stdout.flush()
return
if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')):
PackageLoader = PackageLoaderDebug
|
bsd-3-clause
|
Moth-Tolias/LetterBoy
|
backend/jumbles nontrobo/jumbles nontrobo - last working.py
|
1
|
3870
|
"""return a jumbled version of a string. eg, the lazy hamster is jumping becomes the lzay hmasetr si jmunipg
shuffles insides of words.
"""
import random
#okay, so this will be the jmuble algorythim
#variables, passed
#string_to_jumble = "" #yeah
#jumble_mode = true # do u switch words of two letters
def string_jumble(string_to_jumble, jumble_mode = True):
#variables, internal
string_to_return = "" #New string
string_words = [""] #array containing the words of the string
current_word = [] #array containing the letters of the current word
punctuation_ = [] #array containing the punctuation
i = 0
#put the words in an array
for char in string_to_jumble:
#each space signifies a new word
if char == " ":
i += 1
##make sure there's something to put it in!
string_words.append("")
else:
#otherwise add to the entry
string_words[i] += char
#print(string_words) THIS IS WORKING
#put the letters of the word into an array, and then switch 'em
for word in string_words:
#if the word is two long and mode is true switch 'em
if (len(word) >= 0) and (len(word) <= 3) :
if jumble_mode == True:
#
for char in word:
current_word.append(str(char))
print(current_word)
random.shuffle(current_word)
#pop the word and a space into the return string
for char in current_word:
string_to_return += char
string_to_return += " "
print(string_to_return)
current_word.clear()
#that's all for this word
continue
#ok now for the REAL real deal
#take away the first letter and put it in string_to_return bc it souldn't be jumbled
i = 0
for char in word:
if i == 0:
string_to_return += char
#print(string_to_return)
i = 1
#assert bluh WORKING
continue
#then put almost all of the word in current_word[]
#current_word.append("")
if (i+1) < len(word):
current_word.append(str(char))
print(current_word)
i +=1
#we should be at the last character now
print(i)
print(len(word))
#jumble it
random.shuffle(current_word)
#add to the new string
for char in current_word:
string_to_return += char
#add the last lettr pus a space
string_to_return += word[i]
string_to_return += " "
#flush the string
current_word.clear()
#should be empyt? chrash if not cause something is wrong
#assert current_word == False #this sucks
#next word!
print(string_to_return)
#done
string_jumble("I think my dog is terribly lazy; he is really quite fat.")
|
gpl-3.0
|
jsaveta/Lance
|
src/rescal/build/lib.linux-x86_64-2.7/setuptools/tests/test_resources.py
|
42
|
24620
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# NOTE: the shebang and encoding lines are for ScriptHeaderTests; do not remove
from unittest import TestCase, makeSuite; from pkg_resources import *
from setuptools.command.easy_install import get_script_header, is_sh
import os, pkg_resources, sys, StringIO, tempfile, shutil
try: frozenset
except NameError:
from sets import ImmutableSet as frozenset
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
class Metadata(EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self,*pairs):
self.metadata = dict(pairs)
def has_metadata(self,name):
return name in self.metadata
def get_metadata(self,name):
return self.metadata[name]
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class DistroTests(TestCase):
def testCollection(self):
# empty path should produce no distributions
ad = Environment([], platform=None, python=None)
self.assertEqual(list(ad), [])
self.assertEqual(ad['FooPkg'],[])
ad.add(Distribution.from_filename("FooPkg-1.3_1.egg"))
ad.add(Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg"))
ad.add(Distribution.from_filename("FooPkg-1.2-py2.4.egg"))
# Name is in there now
self.assertTrue(ad['FooPkg'])
# But only 1 package
self.assertEqual(list(ad), ['foopkg'])
# Distributions sort by version
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.3-1','1.2']
)
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.4','1.2']
)
# And inserting adds them in order
ad.add(Distribution.from_filename("FooPkg-1.9.egg"))
self.assertEqual(
[dist.version for dist in ad['FooPkg']], ['1.9','1.4','1.2']
)
ws = WorkingSet([])
foo12 = Distribution.from_filename("FooPkg-1.2-py2.4.egg")
foo14 = Distribution.from_filename("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
self.assertEqual(ad.best_match(req,ws).version, '1.9')
# If a matching distro is already installed, should return only that
ws.add(foo14); self.assertEqual(ad.best_match(req,ws).version, '1.4')
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([]); ws.add(foo12); ws.add(foo14)
self.assertRaises(VersionConflict, ad.best_match, req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([]); ws.add(foo14); ws.add(foo12); ws.add(foo14);
self.assertEqual(ad.best_match(req,ws).version, '1.4')
def checkFooPkg(self,d):
self.assertEqual(d.project_name, "FooPkg")
self.assertEqual(d.key, "foopkg")
self.assertEqual(d.version, "1.3-1")
self.assertEqual(d.py_version, "2.4")
self.assertEqual(d.platform, "win32")
self.assertEqual(d.parsed_version, parse_version("1.3-1"))
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
self.assertEqual(d.py_version, sys.version[:3])
self.assertEqual(d.platform, None)
def testDistroParse(self):
d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg")
self.checkFooPkg(d)
d = Distribution.from_filename("FooPkg-1.3_1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
self.assertEqual(
list(dist.requires(extras)),
list(parse_requirements(txt))
)
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = Environment([]); ws = WorkingSet([])
# Resolving no requirements -> nothing to install
self.assertEqual( list(ws.resolve([],ad)), [] )
# Request something not in the collection -> DistributionNotFound
self.assertRaises(
DistributionNotFound, ws.resolve, parse_requirements("Foo"), ad
)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo); ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
self.assertEqual(targets, [Foo])
map(ws.add,targets)
self.assertRaises(VersionConflict, ws.resolve,
parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
self.assertRaises(
DistributionNotFound, ws.resolve,parse_requirements("Foo[bar]"), ad
)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
self.assertEqual(
list(ws.resolve(parse_requirements("Foo[bar]"), ad)), [Foo,Baz]
)
# Requests for conflicting versions produce VersionConflict
self.assertRaises( VersionConflict,
ws.resolve, parse_requirements("Foo==1.2\nFoo!=1.2"), ad
)
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
self.assertRaises(UnknownExtra, d.requires, ["foo"])
def testSetuptoolsDistributeCombination(self):
# Ensure that installing a 0.7-series setuptools fails. PJE says that
# it will not co-exist.
ws = WorkingSet([])
d = Distribution(
"/some/path",
project_name="setuptools",
version="0.7a1")
self.assertRaises(ValueError, ws.add, d)
# A 0.6-series is no problem
d2 = Distribution(
"/some/path",
project_name="setuptools",
version="0.6c9")
ws.add(d2)
# a unexisting version needs to work
ws = WorkingSet([])
d3 = Distribution(
"/some/path",
project_name="setuptools")
ws.add(d3)
class EntryPointTests(TestCase):
def assertfields(self, ep):
self.assertEqual(ep.name,"foo")
self.assertEqual(ep.module_name,"setuptools.tests.test_resources")
self.assertEqual(ep.attrs, ("EntryPointTests",))
self.assertEqual(ep.extras, ("x",))
self.assertTrue(ep.load() is EntryPointTests)
self.assertEqual(
str(ep),
"foo = setuptools.tests.test_resources:EntryPointTests [x]"
)
def setUp(self):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "setuptools.tests.test_resources", ["EntryPointTests"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = setuptools.tests.test_resources:EntryPointTests [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
self.assertEqual(ep.name,"bar baz")
self.assertEqual(ep.module_name,"spammity")
self.assertEqual(ep.attrs, ())
self.assertEqual(ep.extras, ("ping",))
ep = EntryPoint.parse(" fizzly = wocka:foo")
self.assertEqual(ep.name,"fizzly")
self.assertEqual(ep.module_name,"wocka")
self.assertEqual(ep.attrs, ("foo",))
self.assertEqual(ep.extras, ())
def testRejects(self):
for ep in [
"foo", "x=1=2", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2",
]:
try: EntryPoint.parse(ep)
except ValueError: pass
else: raise AssertionError("Should've been bad", ep)
def checkSubMap(self, m):
self.assertEqual(len(m), len(self.submap_expect))
for key, ep in self.submap_expect.iteritems():
self.assertEqual(repr(m.get(key)), repr(ep))
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
self.assertRaises(ValueError, EntryPoint.parse_group, "x a", "foo=bar")
self.assertRaises(ValueError, EntryPoint.parse_group, "x",
["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
self.assertEqual(m.keys(),['xyz'])
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
self.assertEqual(m.keys(),['xyz'])
self.assertRaises(ValueError, EntryPoint.parse_map, ["[xyz]", "[xyz]"])
self.assertRaises(ValueError, EntryPoint.parse_map, self.submap_str)
class RequirementsTests(TestCase):
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
self.assertEqual(str(r),"Twisted>=1.2")
self.assertEqual(repr(r),"Requirement.parse('Twisted>=1.2')")
self.assertEqual(r, Requirement("Twisted", [('>=','1.2')], ()))
self.assertEqual(r, Requirement("twisTed", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Twisted", [('>=','2.0')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','1.2')], ()))
self.assertNotEqual(r, Requirement("Zope", [('>=','3.0')], ()))
self.assertNotEqual(r, Requirement.parse("Twisted[extras]>=1.2"))
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
self.assertEqual(r1,r2)
self.assertEqual(str(r1),str(r2))
self.assertEqual(str(r2),"Twisted==1.2c1,>=1.2")
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
self.assertTrue(parse_version('1.2') in r)
self.assertTrue(parse_version('1.1') not in r)
self.assertTrue('1.2' in r)
self.assertTrue('1.1' not in r)
self.assertTrue(foo_dist not in r)
self.assertTrue(twist11 not in r)
self.assertTrue(twist12 in r)
def testAdvancedContains(self):
r, = parse_requirements("Foo>=1.2,<=1.3,==1.9,>2.0,!=2.5,<3.0,==4.5")
for v in ('1.2','1.2.2','1.3','1.9','2.0.1','2.3','2.6','3.0c1','4.5'):
self.assertTrue(v in r, (v,r))
for v in ('1.2c1','1.3.1','1.5','1.9.1','2.0','2.5','3.0','4.0'):
self.assertTrue(v not in r, (v,r))
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0")
self.assertEqual(r1,r2)
self.assertEqual(r1,r3)
self.assertEqual(r1.extras, ("foo","bar"))
self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized
self.assertEqual(hash(r1), hash(r2))
self.assertEqual(
hash(r1), hash(("twisted", ((">=",parse_version("1.2")),),
frozenset(["foo","bar"])))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
self.assertTrue(d("foo-0.3a4.egg") not in r1)
self.assertTrue(d("foo-0.3a1.egg") not in r1)
self.assertTrue(d("foo-0.3a4.egg") not in r2)
self.assertTrue(d("foo-0.3a2.egg") in r1)
self.assertTrue(d("foo-0.3a2.egg") in r2)
self.assertTrue(d("foo-0.3a3.egg") in r2)
self.assertTrue(d("foo-0.3a5.egg") in r2)
def testDistributeSetuptoolsOverride(self):
# Plain setuptools or distribute mean we return distribute.
self.assertEqual(
Requirement.parse('setuptools').project_name, 'distribute')
self.assertEqual(
Requirement.parse('distribute').project_name, 'distribute')
# setuptools lower than 0.7 means distribute
self.assertEqual(
Requirement.parse('setuptools==0.6c9').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools==0.6c10').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools>=0.6').project_name, 'distribute')
self.assertEqual(
Requirement.parse('setuptools < 0.7').project_name, 'distribute')
# setuptools 0.7 and higher means setuptools.
self.assertEqual(
Requirement.parse('setuptools == 0.7').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools == 0.7a1').project_name, 'setuptools')
self.assertEqual(
Requirement.parse('setuptools >= 0.7').project_name, 'setuptools')
class ParseTests(TestCase):
def testEmptyParse(self):
self.assertEqual(list(parse_requirements('')), [])
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
self.assertEqual(list(pkg_resources.yield_lines(inp)),out)
def testSplitting(self):
self.assertEqual(
list(
pkg_resources.split_sections("""
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
)
),
[(None,["x"]), ("Y",["z","a"]), ("b",["c"]), ("d",[]), ("q",["v"])]
)
self.assertRaises(ValueError,list,pkg_resources.split_sections("[foo"))
def testSafeName(self):
self.assertEqual(safe_name("adns-python"), "adns-python")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("WSGI Utils"), "WSGI-Utils")
self.assertEqual(safe_name("Money$$$Maker"), "Money-Maker")
self.assertNotEqual(safe_name("peak.web"), "peak-web")
def testSafeVersion(self):
self.assertEqual(safe_version("1.2-1"), "1.2-1")
self.assertEqual(safe_version("1.2 alpha"), "1.2.alpha")
self.assertEqual(safe_version("2.3.4 20050521"), "2.3.4.20050521")
self.assertEqual(safe_version("Money$$$Maker"), "Money-Maker")
self.assertEqual(safe_version("peak.web"), "peak.web")
def testSimpleRequirements(self):
self.assertEqual(
list(parse_requirements('Twis-Ted>=1.2-1')),
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
self.assertEqual(
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0')),
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
self.assertEqual(
Requirement.parse("FooBar==1.99a3"),
Requirement("FooBar", [('==','1.99a3')], ())
)
self.assertRaises(ValueError,Requirement.parse,">=2.3")
self.assertRaises(ValueError,Requirement.parse,"x\\")
self.assertRaises(ValueError,Requirement.parse,"x==2 q")
self.assertRaises(ValueError,Requirement.parse,"X==1\nY==2")
self.assertRaises(ValueError,Requirement.parse,"#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertEqual(p1,p2, (s1,s2,p1,p2))
c('1.2-rc1', '1.2rc1')
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0pl1', '0.0pl1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0-rc1')
c('1.2a1', '1.2.a.1'); c('1.2...a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
self.assertTrue(p1<p2, (s1,s2,p1,p2))
c('2.1','2.1.1')
c('2a1','2b0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1pl4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('A56','B27')
c('3.2', '3.2.pl0')
c('3.2-1', '3.2pl1')
c('3.2pl1', '3.2pl1-1')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0pl1', '0.4pl1')
c('2.1.0-rc1','2.1.0')
c('2.1dev','2.1a0')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
class ScriptHeaderTests(TestCase):
non_ascii_exe = '/Users/José/bin/python'
def test_get_script_header(self):
if not sys.platform.startswith('java') or not is_sh(sys.executable):
# This test is for non-Jython platforms
self.assertEqual(get_script_header('#!/usr/local/bin/python'),
'#!%s\n' % os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python -x'),
'#!%s -x\n' % os.path.normpath(sys.executable))
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
def test_get_script_header_jython_workaround(self):
# This test doesn't work with Python 3 in some locales
if (sys.version_info >= (3,) and os.environ.get("LC_CTYPE")
in (None, "C", "POSIX")):
return
class java:
class lang:
class System:
@staticmethod
def getProperty(property):
return ""
sys.modules["java"] = java
platform = sys.platform
sys.platform = 'java1.5.0_13'
stdout = sys.stdout
try:
# A mock sys.executable that uses a shebang line (this file)
exe = os.path.normpath(os.path.splitext(__file__)[0] + '.py')
self.assertEqual(
get_script_header('#!/usr/local/bin/python', executable=exe),
'#!/usr/bin/env %s\n' % exe)
# Ensure we generate what is basically a broken shebang line
# when there's options, with a warning emitted
sys.stdout = sys.stderr = StringIO.StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python -x',
executable=exe),
'#!%s -x\n' % exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
sys.stdout = sys.stderr = StringIO.StringIO()
self.assertEqual(get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe),
'#!%s -x\n' % self.non_ascii_exe)
self.assertTrue('Unable to adapt shebang line' in sys.stdout.getvalue())
finally:
del sys.modules["java"]
sys.platform = platform
sys.stdout = stdout
class NamespaceTests(TestCase):
def setUp(self):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-distribute-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def tearDown(self):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
def _assertIn(self, member, container):
""" assertIn and assertTrue does not exist in Python2.3"""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
self._assertIn("pkg1", pkg_resources._namespace_packages.keys())
try:
import pkg1.pkg2
except ImportError, e:
self.fail("Distribute tried to import the parent namespace package")
# check the _namespace_packages dict
self._assertIn("pkg1.pkg2", pkg_resources._namespace_packages.keys())
self.assertEqual(pkg_resources._namespace_packages["pkg1"], ["pkg1.pkg2"])
# check the __path__ attribute contains both paths
self.assertEqual(pkg1.pkg2.__path__, [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2") ])
|
apache-2.0
|
vxsx/django-cms
|
cms/tests/test_plugins.py
|
1
|
80142
|
# -*- coding: utf-8 -*-
import base64
import datetime
import json
import os
from django import http
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.core import urlresolvers
from django.core.cache import cache
from django.core.exceptions import (
ValidationError, ImproperlyConfigured, ObjectDoesNotExist)
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
from django.utils import timezone
from cms import api
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered, DontUsePageAttributeWarning
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.sitemaps.cms_sitemap import CMSSitemap
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import (
Article, Section, ArticlePluginModel,
FKModel,
M2MTargetModel)
from cms.test_utils.project.pluginapp.plugins.meta.cms_plugins import (
TestPlugin, TestPlugin2, TestPlugin3, TestPlugin4, TestPlugin5)
from cms.test_utils.project.pluginapp.plugins.validation.cms_plugins import (
NonExisitngRenderTemplate, NoRender, NoRenderButChildren, DynTemplate)
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE,
URL_CMS_PLUGIN_REMOVE, URL_CMS_PAGE_PUBLISH, URL_CMS_PLUGIN_DELETE, URL_CMS_PLUGINS_COPY)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.i18n import force_language
from cms.utils.plugins import get_plugins_for_page, get_plugins
from djangocms_googlemap.models import GoogleMap
from djangocms_inherit.cms_plugins import InheritPagePlaceholderPlugin
from djangocms_file.models import File
from djangocms_inherit.models import InheritPagePlaceholder
from djangocms_link.forms import LinkForm
from djangocms_link.models import Link
from djangocms_picture.models import Picture
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import plugin_tags_to_id_list, plugin_to_tag
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
render_plugin = False
def render(self, context, instance, placeholder):
return context
class DumbFixturePluginWithUrls(DumbFixturePlugin):
name = DumbFixturePlugin.name + " With custom URLs."
render_plugin = False
def _test_view(self, request):
return http.HttpResponse("It works")
def get_plugin_urls(self):
return [
url(r'^testview/$', admin.site.admin_view(self._test_view), name='dumbfixtureplugin'),
]
plugin_pool.register_plugin(DumbFixturePluginWithUrls)
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
request.toolbar = CMSToolbar(request)
return request
def get_response_pk(self, response):
return int(response.content.decode('utf8').split("/edit-plugin/")[1].split("/")[0])
class PluginsTestCase(PluginsTestBaseCase):
def _create_text_plugin_on_page(self, page):
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(response.status_code, 200)
created_plugin_id = self.get_response_pk(response)
self.assertEqual(created_plugin_id, CMSPlugin.objects.all()[0].pk)
return created_plugin_id
def _edit_text_plugin(self, plugin_id, text):
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": text
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.get(pk=plugin_id)
return txt
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
txt = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", txt.body)
# edit body, but click cancel button
data = {
"body": "Hello World!!",
"_cancel": True,
}
edit_url = '%s%d/' % (URL_CMS_PLUGIN_EDIT, created_plugin_id)
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Hello World", txt.body)
def test_plugin_edit_marks_page_dirty(self):
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
created_plugin_id = self._create_text_plugin_on_page(page)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), False)
self._edit_text_plugin(created_plugin_id, "Hello World")
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
def test_plugin_order(self):
"""
Test that plugin position is saved after creation
"""
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk)
db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk)
with self.settings(CMS_PERMISSION=False):
self.assertEqual(text_plugin_1.position, 0)
self.assertEqual(db_plugin_1.position, 0)
self.assertEqual(text_plugin_2.position, 1)
self.assertEqual(db_plugin_2.position, 1)
## Finally we render the placeholder to test the actual content
rendered_placeholder = ph_en.render(self.get_context(page_en.get_absolute_url(), page=page_en), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the second")
def test_plugin_order_alt(self):
"""
Test that plugin position is saved after creation
"""
draft_page = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_2 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the second")
text_plugin_3 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the third")
# Publish to create a 'live' version
draft_page.publish('en')
draft_page = draft_page.reload()
placeholder = draft_page.placeholders.get(slot="col_left")
# Add a plugin and move it to the first position
text_plugin_1 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the first")
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [text_plugin_1.id, text_plugin_2.id, text_plugin_3.id],
}
self.client.post(URL_CMS_PLUGIN_MOVE, data)
draft_page.publish('en')
draft_page = draft_page.reload()
live_page = draft_page.get_public_object()
placeholder = draft_page.placeholders.get(slot="col_left")
live_placeholder = live_page.placeholders.get(slot="col_left")
with self.settings(CMS_PERMISSION=False):
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_1.pk).position, 0)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_2.pk).position, 1)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_3.pk).position, 2)
## Finally we render the placeholder to test the actual content
rendered_placeholder = placeholder.render(self.get_context(draft_page.get_absolute_url(), page=draft_page), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the secondI'm the third")
rendered_live_placeholder = live_placeholder.render(self.get_context(live_page.get_absolute_url(), page=live_page), None)
self.assertEqual(rendered_live_placeholder, "I'm the firstI'm the secondI'm the third")
def test_plugin_breadcrumbs(self):
"""
Test the plugin breadcrumbs order
"""
draft_page = api.create_page("home", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%')
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column, body="I'm the second")
text_breadcrumbs = text_plugin.get_breadcrumb()
self.assertEqual(len(columns.get_breadcrumb()), 1)
self.assertEqual(len(column.get_breadcrumb()), 2)
self.assertEqual(len(text_breadcrumbs), 3)
self.assertTrue(text_breadcrumbs[0]['title'], columns.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[1]['title'], column.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[2]['title'], text_plugin.get_plugin_class().name)
self.assertTrue('/edit-plugin/%s/'% columns.pk in text_breadcrumbs[0]['url'])
self.assertTrue('/edit-plugin/%s/'% column.pk, text_breadcrumbs[1]['url'])
self.assertTrue('/edit-plugin/%s/'% text_plugin.pk, text_breadcrumbs[2]['url'])
def test_add_cancel_plugin(self):
"""
Test that you can cancel a new plugin before editing and
that the plugin is removed.
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"breadcrumb": [
{
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"title": "Text"
}
],
'delete': URL_CMS_PLUGIN_DELETE % pk
}
output = json.loads(response.content.decode('utf8'))
self.assertEqual(output, expected)
# now click cancel instead of editing
response = self.client.get(output['url'])
self.assertEqual(response.status_code, 200)
data = {
"body": "Hello World",
"_cancel": True,
}
response = self.client.post(output['url'], data)
self.assertEqual(response.status_code, 200)
self.assertEqual(0, Text.objects.count())
def test_extract_images_from_text(self):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
img_path = os.path.join(os.path.dirname(__file__), 'data', 'image.jpg')
with open(img_path, 'rb') as fobj:
img_data = base64.b64encode(fobj.read()).decode('utf-8')
body = """<p>
<img alt='' src='data:image/jpeg;base64,{data}' />
</p>""".format(data=img_data)
data = {
"body": body
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertTrue('id="plugin_obj_%s"' % (txt.pk + 1) in txt.body)
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<div class="someclass"></div><p>foo</p>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<div class="someclass"></div><p>foo</p>', txt.body)
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<script>var bar="hacked"</script>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<script>var bar="hacked"</script>', txt.body)
def test_copy_plugins_method(self):
"""
Test that CMSPlugin copy does not have side effects
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
#
text_plugin_en.body += plugin_to_tag(link_plugin_en)
text_plugin_en.save()
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# setup the plugins to copy
plugins = [text_plugin_en, link_plugin_en]
# save the old ids for check
old_ids = [plugin.pk for plugin in plugins]
new_plugins = []
plugins_ziplist = []
old_parent_cache = {}
# This is a stripped down version of cms.copy_plugins.copy_plugins_to
# to low-level testing the copy process
for plugin in plugins:
new_plugins.append(plugin.copy_plugin(ph_de, 'de', old_parent_cache))
plugins_ziplist.append((new_plugins[-1], plugin))
for idx, plugin in enumerate(plugins):
inst, _ = new_plugins[idx].get_plugin_instance()
new_plugins[idx] = inst
new_plugins[idx].post_copy(plugin, plugins_ziplist)
for idx, plugin in enumerate(plugins):
# original plugin instance reference should stay unmodified
self.assertEqual(old_ids[idx], plugin.pk)
# new plugin instance should be different from the original
self.assertNotEqual(new_plugins[idx], plugin.pk)
# text plugins (both old and new) should contain a reference
# to the link plugins
if plugin.plugin_type == 'TextPlugin':
self.assertTrue('link.png' in plugin.body)
self.assertTrue('plugin_obj_%s' % plugin.get_children()[0].pk in plugin.body)
self.assertTrue('link.png' in new_plugins[idx].body)
self.assertTrue('plugin_obj_%s' % new_plugins[idx].get_children()[0].pk in new_plugins[idx].body)
def test_plugin_position(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
placeholder = page_en.placeholders.get(slot="body") # ID 2
placeholder_right = page_en.placeholders.get(slot="right-column")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en") # ID 1
column_1 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%') # ID 2
column_2 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='30%') # ID 3
first_text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the first") # ID 4
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the second") # ID 5
returned_1 = copy_plugins_to([text_plugin], placeholder, 'en', column_1.pk) # ID 6
returned_2 = copy_plugins_to([text_plugin], placeholder_right, 'en') # ID 7
returned_3 = copy_plugins_to([text_plugin], placeholder, 'en', column_2.pk) # ID 8
# STATE AT THIS POINT:
# placeholder
# - columns
# - column_1
# - text_plugin "I'm the first" created here
# - text_plugin "I'm the second" created here
# - text_plugin "I'm the second" (returned_1) copied here
# - column_2
# - text_plugin "I'm the second" (returned_3) copied here
# placeholder_right
# - text_plugin "I'm the second" (returned_2) copied here
# First plugin in the plugin branch
self.assertEqual(first_text_plugin.position, 0)
# Second plugin in the plugin branch
self.assertEqual(text_plugin.position, 1)
# Added as third plugin in the same branch as the above
self.assertEqual(returned_1[0][0].position, 2)
# First plugin in a placeholder
self.assertEqual(returned_2[0][0].position, 0)
# First plugin nested in a plugin
self.assertEqual(returned_3[0][0].position, 0)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.get_plugins(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
# test subplugin copy
copy_plugins_to([link_plugin_en], ph_de, 'de')
def test_deep_copy_plugins(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_en = api.add_plugin(ph_en, "MultiColumnPlugin", "en", position="first-child")
# Grid column 1.1
col1_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# Grid column 1.2
col2_en = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1_en)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(
ph_en,
"LinkPlugin",
"en",
target=col2_en,
name="A Link",
url="https://www.django-cms.org"
)
old_plugins = [mcol1_en, col1_en, col2_en, link_plugin_en]
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_de = page_de.placeholders.get(slot="body")
# Grid wrapper 1
mcol1_de = api.add_plugin(ph_de, "MultiColumnPlugin", "de", position="first-child")
# Grid column 1.1
col1_de = api.add_plugin(ph_de, "ColumnPlugin", "de", position="first-child", target=mcol1_de)
copy_plugins_to(
old_plugins=[mcol1_en, col1_en, col2_en, link_plugin_en],
to_placeholder=ph_de,
to_language='de',
parent_plugin_id=col1_de.pk,
)
col1_de = self.reload(col1_de)
new_plugins = col1_de.get_descendants().order_by('path')
self.assertEqual(new_plugins.count(), len(old_plugins))
for old_plugin, new_plugin in zip(old_plugins, new_plugins):
self.assertEqual(old_plugin.numchild, new_plugin.numchild)
with self.assertNumQueries(FuzzyInt(0, 207)):
page_en.publish('en')
def test_plugin_validation(self):
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NonExisitngRenderTemplate)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRender)
self.assertRaises(ImproperlyConfigured, plugin_pool.validate_templates, NoRenderButChildren)
plugin_pool.validate_templates(DynTemplate)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': self.get_response_pk(response)
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % self.get_response_pk(response)
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
api.create_page("home", "nav_playground.html", "en")
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
plugin_id = self.get_response_pk(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/en/publish/" % page.pk, {1: 1})
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin_id
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin_id
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=False).count(), 1)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'parent': self.get_response_pk(response)
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# no longer allowed for security reasons
self.assertEqual(response.status_code, 404)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
plugin_pool.register_plugin(DumbFixturePlugin)
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's also unregister the plugin now, and assert it's not in the
# pool anymore
plugin_pool.unregister_plugin(DumbFixturePlugin)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = GoogleMap(
plugin_type='GoogleMapPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
address="Riedtlistrasse 16",
zipcode="8006",
city="Zurich",
)
plugin.add_root(instance=plugin)
inheritfrompage.publish('en')
page = api.create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.add_root(instance=inherit_plugin)
page.publish('en')
self.client.logout()
cache.clear()
response = self.client.get(page.get_absolute_url())
self.assertTrue(
'https://maps-api-ssl.google.com/maps/api/js' in response.content.decode('utf8').replace("&", "&"))
def test_inherit_plugin_with_empty_plugin(self):
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en', published=True)
body = inheritfrompage.placeholders.get(slot="body")
empty_plugin = CMSPlugin(
plugin_type='TextPlugin', # create an empty plugin
placeholder=body,
position=1,
language='en',
)
empty_plugin.add_root(instance=empty_plugin)
other_page = api.create_page('other page', 'nav_playground.html', 'en', published=True)
inherited_body = other_page.placeholders.get(slot="body")
api.add_plugin(inherited_body, InheritPagePlaceholderPlugin, 'en', position='last-child',
from_page=inheritfrompage, from_language='en')
api.add_plugin(inherited_body, "TextPlugin", "en", body="foobar")
# this should not fail, even if there in an empty plugin
rendered = inherited_body.render(context=self.get_context(other_page.get_absolute_url(), page=other_page), width=200)
self.assertIn("foobar", rendered)
def test_render_textplugin(self):
# Setup
page = api.create_page("render test", "nav_playground.html", "en")
ph = page.placeholders.get(slot="body")
text_plugin = api.add_plugin(ph, "TextPlugin", "en", body="Hello World")
link_plugins = []
for i in range(0, 10):
link_plugins.append(api.add_plugin(ph, "LinkPlugin", "en",
target=text_plugin,
name="A Link %d" % i,
url="http://django-cms.org"))
text_plugin.body += '<img src="/static/cms/img/icons/plugins/link.png" alt="Link - %s" id="plugin_obj_%d" title="Link - %s" />' % (
link_plugins[-1].name,
link_plugins[-1].pk,
link_plugins[-1].name,
)
text_plugin.save()
ph = Placeholder.objects.get(pk=ph.pk)
text_plugin.body = '\n'.join(['<img id="plugin_obj_%d" src=""/>' % l.cmsplugin_ptr_id for l in link_plugins])
text_plugin.save()
text_plugin = self.reload(text_plugin)
with self.assertNumQueries(2):
rendered = text_plugin.render_plugin(placeholder=ph)
for i in range(0, 10):
self.assertTrue('A Link %d' % i in rendered)
def test_copy_textplugin(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=0,
language=self.FIRST_LANG)
plugin_base = plugin_base.add_root(instance=plugin_base)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
plugin_ref_1_base = CMSPlugin(
plugin_type='EmptyPlugin',
placeholder=placeholder,
position=0,
language=self.FIRST_LANG)
plugin_ref_1_base = plugin_base.add_child(instance=plugin_ref_1_base)
plugin_ref_2_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_ref_2_base = plugin_base.add_child(instance=plugin_ref_2_base)
plugin_ref_2 = Text(body='')
plugin_ref_2_base.set_base_attr(plugin_ref_2)
plugin_ref_2.save()
plugin.body = ' <img id="plugin_obj_%s" src=""/><img id="plugin_obj_%s" src=""/>' % (
str(plugin_ref_1_base.pk), str(plugin_ref_2.pk))
plugin.save()
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 3)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 3)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.count(), 6)
plugins = list(CMSPlugin.objects.all())
new_plugin = plugins[3].get_plugin_instance()[0]
idlist = sorted(plugin_tags_to_id_list(new_plugin.body))
expected = sorted([plugins[4].pk, plugins[5].pk])
self.assertEqual(idlist, expected)
def test_search_pages(self):
"""
Test search for pages
To be fully useful, this testcase needs to have the following different
Plugin configurations within the project:
* unaltered cmsplugin_ptr
* cmsplugin_ptr with related_name='+'
* cmsplugin_ptr with related_query_name='+'
* cmsplugin_ptr with related_query_name='whatever_foo'
* cmsplugin_ptr with related_name='whatever_bar'
* cmsplugin_ptr with related_query_name='whatever_foo' and related_name='whatever_bar'
Those plugins are in cms/test_utils/project/pluginapp/revdesc/models.py
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text = Text(body="hello", language="en", placeholder=placeholder, plugin_type="TextPlugin", position=1)
text.save()
page.publish('en')
self.assertEqual(Page.objects.search("hi").count(), 0)
self.assertEqual(Page.objects.search("hello").count(), 1)
def test_empty_plugin_is_not_ignored(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
# this should not raise any errors, but just ignore the empty plugin
out = placeholder.render(self.get_context(), width=300)
self.assertFalse(len(out))
self.assertTrue(len(placeholder._plugins_cache))
def test_defer_pickel(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
api.add_plugin(placeholder, "TextPlugin", 'en', body="Hello World")
plugins = Text.objects.all().defer('path')
import pickle
import io
a = io.BytesIO()
pickle.dump(plugins[0], a)
def test_empty_plugin_description(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
self.assertEqual(a.get_short_description(), "<Empty>")
def test_page_attribute_warns(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
a.save()
def get_page(plugin):
return plugin.page
self.assertWarns(
DontUsePageAttributeWarning,
"Don't use the page attribute on CMSPlugins! CMSPlugins are not guaranteed to have a page associated with them!",
get_page, a
)
def test_set_translatable_content(self):
a = Text(body="hello")
self.assertTrue(a.set_translatable_content({'body': 'world'}))
b = Link(name="hello")
self.assertTrue(b.set_translatable_content({'name': 'world'}))
def test_editing_plugin_changes_page_modification_time_in_sitemap(self):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
page = api.create_page("page", "nav_playground.html", "en", published=True)
title = page.get_title_obj('en')
page.creation_date = one_day_ago
page.changed_date = one_day_ago
plugin_id = self._create_text_plugin_on_page(page)
plugin = self._edit_text_plugin(plugin_id, "fnord")
actual_last_modification_time = CMSSitemap().lastmod(title)
actual_last_modification_time -= datetime.timedelta(microseconds=actual_last_modification_time.microsecond)
self.assertEqual(plugin.changed_date.date(), actual_last_modification_time.date())
def test_moving_plugin_to_different_placeholder(self):
plugin_pool.register_plugin(DumbFixturePlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
plugin_data = {
'plugin_type': 'DumbFixturePlugin',
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot='body').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
plugin_data['plugin_parent'] = self.get_response_pk(response)
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
post = {
'plugin_id': self.get_response_pk(response),
'placeholder_id': page.placeholders.get(slot='right-column').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
from cms.utils.plugins import build_plugin_tree
build_plugin_tree(page.placeholders.get(slot='right-column').get_plugins_list())
plugin_pool.unregister_plugin(DumbFixturePlugin)
def test_get_plugins_for_page(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm inside an existing placeholder.")
# This placeholder is not in the template.
ph_en_not_used = page_en.placeholders.create(slot="not_used")
text_plugin_2 = api.add_plugin(ph_en_not_used, "TextPlugin", "en", body="I'm inside a non-existent placeholder.")
page_plugins = get_plugins_for_page(None, page_en, page_en.get_title_obj_attribute('language'))
db_text_plugin_1 = page_plugins.get(pk=text_plugin_1.pk)
self.assertRaises(CMSPlugin.DoesNotExist, page_plugins.get, pk=text_plugin_2.pk)
self.assertEqual(db_text_plugin_1.pk, text_plugin_1.pk)
def test_plugin_move_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
plugin_pool.register_plugin(ReloadDrivenPlugin)
plugin_pool.register_plugin(NonReloadDrivenPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
plugin_1 = api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
with force_language('en'):
plugin_1_action_urls = plugin_1.get_action_urls()
reload_expected = {
'reload': True,
'urls': plugin_1_action_urls,
}
# Test Plugin reload == True on Move
post = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), reload_expected)
with force_language('en'):
plugin_2_action_urls = plugin_2.get_action_urls()
no_reload_expected = {
'reload': False,
'urls': plugin_2_action_urls,
}
# Test Plugin reload == False on Move
post = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), no_reload_expected)
plugin_pool.unregister_plugin(ReloadDrivenPlugin)
plugin_pool.unregister_plugin(NonReloadDrivenPlugin)
def test_plugin_copy_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
plugin_pool.register_plugin(ReloadDrivenPlugin)
plugin_pool.register_plugin(NonReloadDrivenPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
# Test Plugin reload == True on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], True)
# Test Plugin reload == False on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'source_plugin_id': plugin_2.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], False)
plugin_pool.unregister_plugin(ReloadDrivenPlugin)
plugin_pool.unregister_plugin(NonReloadDrivenPlugin)
def test_custom_plugin_urls(self):
plugin_url = urlresolvers.reverse('admin:dumbfixtureplugin')
response = self.client.get(plugin_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"It works")
def test_plugin_require_parent(self):
"""
Assert that a plugin marked as 'require_parent' is not listed
in the plugin pool when a placeholder is specified
"""
ParentRequiredPlugin = type('ParentRequiredPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
plugin_pool.register_plugin(ParentRequiredPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
plugin_list = plugin_pool.get_all_plugins(placeholder=placeholder, page=page)
self.assertFalse(ParentRequiredPlugin in plugin_list)
plugin_pool.unregister_plugin(ParentRequiredPlugin)
def test_plugin_parent_classes(self):
"""
Assert that a plugin with a list of parent classes only appears in the
toolbar plugin struct for those given parent Plugins
"""
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['GenericParentPlugin'], render_plugin=False))
GenericParentPlugin = type('GenericParentPlugin', (CMSPluginBase,), {'render_plugin':False})
KidnapperPlugin = type('KidnapperPlugin', (CMSPluginBase,), {'render_plugin':False})
expected_struct = {'module': u'Generic',
'name': u'Parent Classes Plugin',
'value': 'ParentClassesPlugin'}
for plugin in [ParentClassesPlugin, GenericParentPlugin, KidnapperPlugin]:
plugin_pool.register_plugin(plugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
from cms.utils.placeholder import get_toolbar_plugin_struct
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin],
placeholder.slot,
page,
parent=GenericParentPlugin)
self.assertTrue(expected_struct in toolbar_struct)
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin],
placeholder.slot,
page,
parent=KidnapperPlugin)
self.assertFalse(expected_struct in toolbar_struct)
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin, GenericParentPlugin],
placeholder.slot,
page)
expected_struct = {'module': u'Generic',
'name': u'Generic Parent Plugin',
'value': 'GenericParentPlugin'}
self.assertTrue(expected_struct in toolbar_struct)
for plugin in [ParentClassesPlugin, GenericParentPlugin, KidnapperPlugin]:
plugin_pool.unregister_plugin(plugin)
def test_plugin_child_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ChildClassesPlugin = type('ChildClassesPlugin', (CMSPluginBase,),
dict(child_classes=['TextPlugin'], render_template='allow_children_plugin.html'))
plugin_pool.register_plugin(ChildClassesPlugin)
plugin = api.add_plugin(placeholder, ChildClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_child_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'child_classes': {
'ChildClassesPlugin': ['LinkPlugin', 'PicturePlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['LinkPlugin', 'PicturePlugin'],
plugin.get_child_classes(placeholder.slot, page))
plugin_pool.unregister_plugin(ChildClassesPlugin)
def test_plugin_parent_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['TextPlugin'], render_plugin=False))
plugin_pool.register_plugin(ParentClassesPlugin)
plugin = api.add_plugin(placeholder, ParentClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_parent_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'parent_classes': {
'ParentClassesPlugin': ['TestPlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['TestPlugin'],
plugin.get_parent_classes(placeholder.slot, page))
plugin_pool.unregister_plugin(ParentClassesPlugin)
def test_plugin_translatable_content_getter_setter(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
plugin = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", plugin.body)
# see if the getter works
self.assertEqual({'body': "Hello World"}, plugin.get_translatable_content())
# change the content
self.assertEqual(True, plugin.set_translatable_content({'body': "It works!"}))
# check if it changed
self.assertEqual("It works!", plugin.body)
# double check through the getter
self.assertEqual({'body': "It works!"}, plugin.get_translatable_content())
def test_plugin_pool_register_returns_plugin_class(self):
@plugin_pool.register_plugin
class DecoratorTestPlugin(CMSPluginBase):
render_plugin = False
name = "Test Plugin"
self.assertIsNotNone(DecoratorTestPlugin)
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = api.create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
# This try/except block allows older and newer versions of the
# djangocms-file plugin to work here.
try:
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
except ObjectDoesNotExist: # catches 'RelatedObjectDoesNotExist'
plugin.source.save("UPPERCASE.JPG", SimpleUploadedFile(
"UPPERCASE.jpg", b"content"), False)
plugin.add_root(instance=plugin)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" % i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_dynamic_plugin_template(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="a title")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="custom template")
request = self.get_request(path=page_en.get_absolute_url())
plugins = get_plugins(request, ph_en, page_en.template)
for plugin in plugins:
if plugin.title == 'custom template':
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles_custom.html')
self.assertTrue('Articles Custom template' in plugin.render_plugin({}, ph_en))
else:
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles.html')
self.assertFalse('Articles Custom template' in plugin.render_plugin({}, ph_en))
def test_add_plugin_with_m2m(self):
# add a new text plugin
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
page.publish('en')
placeholder = page.placeholders.get(slot="body")
plugin_data = {
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'plugin_parent': '',
'placeholder_id': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"breadcrumb": [
{
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"title": "Articles"
}
],
'delete': URL_CMS_PLUGIN_DELETE % pk
}
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + str(CMSPlugin.objects.all()[0].pk) + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(self.section_count, plugin.sections.count())
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
self.assertEqual(plugin.sections.through._meta.db_table, 'manytomany_rel_articlepluginmodel_sections')
def test_add_plugin_with_m2m_and_publisher(self):
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
plugin_data = {
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'plugin_parent': '',
'placeholder_id': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"breadcrumb": [
{
"url": URL_CMS_PLUGIN_EDIT + "%s/" % pk,
"title": "Articles"
}
],
'delete': URL_CMS_PLUGIN_DELETE % pk
}
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
# there should be only 1 plugin
self.assertEqual(1, CMSPlugin.objects.all().count())
articles_plugin_pk = CMSPlugin.objects.all()[0].pk
self.assertEqual(articles_plugin_pk, CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + str(CMSPlugin.objects.all()[0].pk) + "/"
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(u'Articles Plugin 1', articles_plugin.title)
self.assertEqual(self.section_count, articles_plugin.sections.count())
# check publish box
page = api.publish_page(page, self.super_user, 'en')
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PLUGINS_COPY, copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 1)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.count(), 2)
db_counts = [plgn.sections.count() for plgn in ArticlePluginModel.objects.all()]
expected = [self.section_count for _ in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class PluginCopyRelationsTestCase(PluginsTestBaseCase):
"""Test the suggestions in the docs for copy_relations()"""
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
page_data1 = self.get_new_page_data_dbfields()
page_data1['published'] = False
self.page1 = api.create_page(**page_data1)
page_data2 = self.get_new_page_data_dbfields()
page_data2['published'] = False
self.page2 = api.create_page(**page_data2)
self.placeholder1 = self.page1.placeholders.get(slot='body')
self.placeholder2 = self.page2.placeholders.get(slot='body')
def test_copy_fk_from_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithFKFromModel",
language=self.FIRST_LANG,
)
FKModel.objects.create(fk_field=plugin)
old_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = FKModel.objects.filter(
fk_field__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
def test_copy_m2m_to_model(self):
plugin = api.add_plugin(
placeholder=self.placeholder1,
plugin_type="PluginWithM2MToModel",
language=self.FIRST_LANG,
)
m2m_target = M2MTargetModel.objects.create()
plugin.m2m_field.add(m2m_target)
old_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
api.publish_page(
self.page1,
self.super_user,
self.FIRST_LANG
)
new_public_count = M2MTargetModel.objects.filter(
pluginmodelwithm2mtomodel__placeholder__page__publisher_is_draft=False
).count()
self.assertEqual(
new_public_count,
old_public_count + 1
)
class PluginsMetaOptionsTests(TestCase):
''' TestCase set for ensuring that bugs like #992 are caught '''
# these plugins are inlined because, due to the nature of the #992
# ticket, we cannot actually import a single file with all the
# plugin variants in, because that calls __new__, at which point the
# error with splitted occurs.
def test_meta_options_as_defaults(self):
''' handling when a CMSPlugin meta options are computed defaults '''
# this plugin relies on the base CMSPlugin and Model classes to
# decide what the app_label and db_table should be
plugin = TestPlugin.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_as_declared_defaults(self):
''' handling when a CMSPlugin meta options are declared as per defaults '''
# here, we declare the db_table and app_label explicitly, but to the same
# values as would be computed, thus making sure it's not a problem to
# supply options.
plugin = TestPlugin2.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel2')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_app_label(self):
''' make sure customised meta options on CMSPlugins don't break things '''
plugin = TestPlugin3.model
self.assertEqual(plugin._meta.db_table, 'one_thing_testpluginmodel3')
self.assertEqual(plugin._meta.app_label, 'one_thing')
def test_meta_options_custom_db_table(self):
''' make sure custom database table names are OK. '''
plugin = TestPlugin4.model
self.assertEqual(plugin._meta.db_table, 'or_another_4')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_both(self):
''' We should be able to customise app_label and db_table together '''
plugin = TestPlugin5.model
self.assertEqual(plugin._meta.db_table, 'or_another_5')
self.assertEqual(plugin._meta.app_label, 'one_thing')
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertTrue(form.is_valid())
def test_opens_in_same_window_by_default(self):
"""Could not figure out how to render this plugin
Checking only for the values in the model"""
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test'})
link = form.save()
self.assertEqual(link.target, '')
def test_open_in_blank_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_blank'})
link = form.save()
self.assertEqual(link.target, '_blank')
def test_open_in_parent_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_parent'})
link = form.save()
self.assertEqual(link.target, '_parent')
def test_open_in_top_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_top'})
link = form.save()
self.assertEqual(link.target, '_top')
def test_open_in_nothing_else(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': 'artificial'})
self.assertFalse(form.is_valid())
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin1
self.assertEqual(TestPlugin1._meta.db_table, 'bunch_of_plugins_testplugin1')
def test_db_table_hack_with_mixin(self):
# Plugin models have been moved away due to Django's AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin2
self.assertEqual(TestPlugin2._meta.db_table, 'bunch_of_plugins_testplugin2')
def test_pickle(self):
text = Text()
text.__reduce__()
class PicturePluginTests(PluginsTestBaseCase):
def test_link_or_page(self):
"""Test a validator: you can enter a url or a page_link, but not both."""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
picture = Picture(url="test")
# Note: don't call full_clean as it will check ALL fields - including
# the image, which we haven't defined. Call clean() instead which
# just validates the url and page_link fields.
picture.clean()
picture.page_link = page
picture.url = None
picture.clean()
picture.url = "test"
self.assertRaises(ValidationError, picture.clean)
class SimplePluginTests(TestCase):
def test_simple_naming(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
self.assertEqual(MyPlugin.name, 'My Plugin')
def test_simple_context(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
plugin = MyPlugin(ArticlePluginModel, admin.site)
context = {}
out_context = plugin.render(context, 1, 2)
self.assertEqual(out_context['instance'], 1)
self.assertEqual(out_context['placeholder'], 2)
self.assertIs(out_context, context)
class BrokenPluginTests(TestCase):
def test_import_broken_plugin(self):
"""
If there is an import error in the actual cms_plugin file it should
raise the ImportError rather than silently swallowing it -
in opposition to the ImportError if the file 'cms_plugins.py' doesn't
exist.
"""
new_apps = ['cms.test_utils.project.brokenpluginapp']
with self.settings(INSTALLED_APPS=new_apps):
plugin_pool.discovered = False
self.assertRaises(ImportError, plugin_pool.discover_plugins)
class MTIPluginsTestCase(PluginsTestBaseCase):
def test_add_edit_plugin(self):
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
"""
Test that we can instantiate and use a MTI plugin
"""
# Create a page
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# Add the MTI plugin
plugin_data = {
'plugin_type': "TestPluginBeta",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
plugin_id = self.get_response_pk(response)
self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# Test we can open the change form for the MTI plugin
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
# Edit the MTI plugin
data = {
"alpha": "ALPHA",
"beta": "BETA"
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
# Test that the change was properly stored in the DB
plugin_model = TestPluginBetaModel.objects.all()[0]
self.assertEqual("ALPHA", plugin_model.alpha)
self.assertEqual("BETA", plugin_model.beta)
|
bsd-3-clause
|
kedz/cuttsum
|
trec2015/sbin/l2s/apsal-dev.py
|
1
|
9067
|
import cuttsum.events
import cuttsum.corpora
from cuttsum.pipeline import InputStreamResource
from cuttsum.classifiers import NuggetRegressor
import cuttsum.judgements
import pandas as pd
import numpy as np
from datetime import datetime
from cuttsum.misc import event2semsim
from sklearn.cluster import AffinityPropagation
from sklearn.metrics.pairwise import cosine_similarity
import os
def epoch(dt):
return int((dt - datetime(1970, 1, 1)).total_seconds())
matches_df = cuttsum.judgements.get_merged_dataframe()
def get_input_stream(event, gold_probs, extractor="goose", thresh=.8, delay=None, topk=20):
max_nuggets = 3
corpus = cuttsum.corpora.get_raw_corpus(event)
res = InputStreamResource()
df = pd.concat(
res.get_dataframes(event, corpus, extractor, thresh, delay, topk))
selector = (df["n conf"] == 1) & (df["nugget probs"].apply(len) == 0)
df.loc[selector, "nugget probs"] = df.loc[selector, "nuggets"].apply(lambda x: {n:1 for n in x})
df["true probs"] = df["nugget probs"].apply(lambda x: [val for key, val in x.items()] +[0])
df["true probs"] = df["true probs"].apply(lambda x: np.max(x))
df.loc[(df["n conf"] == 1) & (df["nuggets"].apply(len) == 0), "true probs"] = 0
if gold_probs is True:
df["probs"] = df["true probs"]
else:
df["probs"] = NuggetRegressor().predict(event, df)
df["nuggets"] = df["nugget probs"].apply(
lambda x: set([key for key, val in x.items() if val > .9]))
nid2time = {}
nids = set(matches_df[matches_df["query id"] == event.query_id]["nugget id"].tolist())
for nid in nids:
ts = matches_df[matches_df["nugget id"] == nid]["update id"].apply(lambda x: int(x.split("-")[0])).tolist()
ts.sort()
nid2time[nid] = ts[0]
fltr_nuggets = []
for name, row in df.iterrows():
fltr_nuggets.append(
set([nug for nug in row["nuggets"] if nid2time[nug] <= row["timestamp"]]))
#print df[["nuggets", "timestamp"]].apply(lambda y: print y[0]) # datetime.utcfromtimestamp(int(y["timestamp"])))
#print nids
df["nuggets"] = fltr_nuggets
df["nuggets"] = df["nuggets"].apply(lambda x: x if len(x) <= max_nuggets else set([]))
from cuttsum.pipeline import DedupedArticlesResource
ded = DedupedArticlesResource()
stats_df = ded.get_stats_df(event, corpus, extractor, thresh)
stats_df["stream ids"] = stats_df["stream ids"].apply(lambda x: set(eval(x)))
sid2match = {}
for _, row in stats_df.iterrows():
for sid in row["stream ids"]:
sid2match[sid] = row["match"]
all_ts = []
all_docs = []
new_docs = []
for (sid, ts), doc in df.groupby(["stream id", "timestamp"]):
# print sub_doc
if len(all_ts) > 0:
assert ts >= all_ts[-1]
all_ts.append(ts)
if sid2match[sid] is True:
new_docs.append(doc)
all_docs.append(doc)
df = pd.concat(new_docs)
print len(all_docs), len(new_docs)
return df
def main(output_dir, sim_threshold, bucket_size, pref_offset):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dev_qids = set([19, 23, 27, 34, 35])
summary_data = []
K_data = []
for event in cuttsum.events.get_events():
if event.query_num not in dev_qids: continue
print event
semsim = event2semsim(event)
istream = get_input_stream(event, False, extractor="goose",
thresh=.8, delay=None, topk=20)
prev_time = 0
cache = None
clusters = []
max_h = len(event.list_event_hours()) - 1
for h, hour in enumerate(event.list_event_hours()):
if h % bucket_size != 0 and h != max_h:
continue
current_time = epoch(hour)
input_sents = istream[
(istream["timestamp"] < current_time) & \
(istream["timestamp"] >= prev_time)]
len_select = input_sents["lemmas stopped"].apply(len) > 10
input_sents = input_sents[len_select]
if len(input_sents) <= 1: continue
stems = input_sents["stems"].apply(lambda x: ' '.join(x)).tolist()
X = semsim.transform(stems)
probs = input_sents["probs"]
p = probs.values
K = -(1 - cosine_similarity(X))
K_ma = np.ma.masked_array(K, np.eye(K.shape[0]))
Kmin = np.ma.min(K_ma)
Kmax = np.ma.max(K_ma)
median = np.ma.median(K_ma)[0]
pref = np.minimum(p + median, -.05)
print "SYS TIME:", hour, "# SENTS:", K.shape[0],
print "min/median/max pref: {}/{}/{}".format(
pref.min(), np.median(pref), pref.max())
#K_data.append({"min": Kmin, "max": Kmax, "median": median})
K_data.append({"min": (pref).min(), "max": (pref).max(),
"median": np.median((pref))})
#print K
# continue
#
ap = AffinityPropagation(
preference=pref-pref_offset, affinity="precomputed",
verbose=True, max_iter=1000)
ap.fit(K)
# ##print input_sents["pretty text"]
#
labels = ap.labels_
if ap.cluster_centers_indices_ != None:
for c in ap.cluster_centers_indices_:
if cache == None:
cache = X[c]
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
else:
Ksum = cosine_similarity(cache, X[c])
#print "MAX SIM", Ksum.max()
#print input_sents.reset_index(drop=True).iloc[c]["sent text"]
if Ksum.max() < sim_threshold:
cache = np.vstack([cache, X[c]])
updates_df = \
input_sents.reset_index(drop=True).iloc[c]
updates_df["query id"] = event.query_num
updates_df["system timestamp"] = current_time
summary_data.append(
updates_df[
["query id", "stream id", "sent id",
"system timestamp", "sent text"]
].to_frame().T
)
#
# for l, i in enumerate(af.cluster_centers_indices_):
# support = np.sum(labels == l)
# center = input_sents.iloc[i][["update id", "sent text", "pretty text", "stems", "nuggets"]]
# center = center.to_dict()
# center["support"] = support
# center["timestamp"] = current_time
# clusters.append(center)
#
prev_time = current_time
# df = pd.DataFrame(clusters, columns=["update id", "timestamp", "support", "sent text", "pretty text", "stems", "nuggets"])
#
# import os
# dirname = "clusters"
# if not os.path.exists(dirname):
# os.makedirs(dirname)
#
# with open(os.path.join(dirname, "{}.tsv".format(event.query_id)), "w") as f:
# df.to_csv(f, sep="\t", index=False)
#
df = pd.DataFrame(K_data, columns=["min", "max", "median"])
print df
print df.mean()
print df.std()
print df.max()
df = pd.concat(summary_data)
df["conf"] = .5
df["team id"] = "APSAL"
df["run id"] = "sim{}_bs{}_off{}".format(
sim_threshold, bucket_size, pref_offset)
print df
of = os.path.join(output_dir, "apsal" + "sim{}_bs{}_off{}.tsv".format(
sim_threshold, bucket_size, pref_offset))
cols = ["query id", "team id", "run id", "stream id", "sent id",
"system timestamp", "conf"]
df[cols].to_csv(of, sep="\t", header=False, index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(u"--output-dir", type=str,
required=True, help="directory to write results.")
parser.add_argument(
u"--sim-cutoff", type=float, required=True)
parser.add_argument(
u"--bucket-size", type=float, required=True)
parser.add_argument(
u"--pref-offset", type=float, required=True)
args = parser.parse_args()
main(args.output_dir, args.sim_cutoff,
args.bucket_size, args.pref_offset)
|
apache-2.0
|
rtindru/django
|
django/conf/locale/ml/formats.py
|
1007
|
1815
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
loco-odoo/localizacion_co
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons-extra/purchase_requisition_analytic/__init__.py
|
10
|
1507
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import analytic
import purchase_requisition
import hr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ekasitk/sahara
|
sahara/swift/utils.py
|
1
|
2014
|
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import six
from six.moves.urllib import parse as urlparse
from sahara.utils.openstack import base as clients_base
CONF = cfg.CONF
SWIFT_INTERNAL_PREFIX = "swift://"
SWIFT_URL_SUFFIX_START = '.'
SWIFT_URL_SUFFIX = SWIFT_URL_SUFFIX_START + 'sahara'
def retrieve_auth_url():
"""This function returns auth url v2.0 api.
Hadoop Swift library doesn't support keystone v3 api.
"""
auth_url = clients_base.retrieve_auth_url()
info = urlparse.urlparse(auth_url)
if CONF.use_domain_for_proxy_users:
url = 'v3/auth'
else:
url = 'v2.0'
if info.port:
returned_url = '{scheme}://{hostname}:{port}/{url}/'
return returned_url.format(scheme=info.scheme,
hostname=info.hostname,
port=info.port,
url=url)
else:
return '{scheme}://{hostname}/{url}/'.format(scheme=info.scheme,
hostname=info.hostname,
url=url)
def inject_swift_url_suffix(url):
if isinstance(url, six.string_types) and url.startswith("swift://"):
u = urlparse.urlparse(url)
if not u.netloc.endswith(SWIFT_URL_SUFFIX):
return url.replace(u.netloc,
u.netloc + SWIFT_URL_SUFFIX, 1)
return url
|
apache-2.0
|
vipulkanade/EventbriteDjango
|
lib/python2.7/encodings/iso8859_10.py
|
593
|
13845
|
""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
domenicosolazzo/practice-django
|
venv/lib/python2.7/distutils/__init__.py
|
1211
|
3983
|
import os
import sys
import warnings
import imp
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
# Copy the relevant attributes
try:
__revision__ = real_distutils.__revision__
except AttributeError:
pass
__version__ = real_distutils.__version__
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
|
mit
|
codrut3/tensorflow
|
tensorflow/contrib/specs/python/params_ops.py
|
186
|
3104
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow parameter specifications.
This module is used as an environment for evaluating expressions
in the "params" DSL.
Specifications are intended to assign simple numerical
values. Examples:
--params "n=64; d=5" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
The random parameter primitives are useful for running large numbers
of experiments with randomly distributed parameters:
--params "n=Li(5,500); d=Ui(1,5)" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
Internally, this might be implemented as follows:
params = specs.create_params(FLAGS.params, {})
logging.info(repr(params))
net = specs.create_net(FLAGS.spec, inputs, params)
Note that separating the specifications into parameters and network
creation allows us to log the random parameter values easily.
The implementation of this will change soon in order to support
hyperparameter tuning with steering. Instead of returning a number,
the primitives below will return a class instance that is then
used to generate a random number by the framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Lint disabled because these are operators in the DSL, not regular
# Python functions.
# pylint: disable=invalid-name
# pylint: disable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: disable=redefined-builtin,g-importing-member,no-member
# make available all math expressions
import math
from math import *
import random
# pylint: enable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: enable=redefined-builtin,g-importing-member,no-member
def Uf(lo=0.0, hi=1.0):
"""Uniformly distributed floating number."""
return random.uniform(lo, hi)
def Ui(lo, hi):
"""Uniformly distributed integer, inclusive limits."""
return random.randint(lo, hi)
def Lf(lo, hi):
"""Log-uniform distributed floatint point number."""
return math.exp(random.uniform(math.log(lo), math.log(hi)))
def Li(lo, hi):
"""Log-uniform distributed integer, inclusive limits."""
return int(math.floor(math.exp(random.uniform(math.log(lo),
math.log(hi+1-1e-5)))))
def Nt(mu, sigma, limit=3.0):
"""Normally distributed floating point number with truncation."""
return min(max(random.gauss(mu, sigma), mu-limit*sigma), mu+limit*sigma)
# pylint: enable=invalid-name
|
apache-2.0
|
linucks/ample
|
ample/ensembler/truncation_util.py
|
1
|
17337
|
"""Truncation utility module"""
__author__ = "Jens Thomas, and Felix Simkovic"
__date__ = "10 Jul 2018"
__version__ = "1.0"
import collections
from enum import Enum
import logging
import os
import sys
from ample.ensembler._ensembler import model_core_from_fasta
from ample.util import ample_util, pdb_edit, theseus
logger = logging.getLogger(__name__)
MIN_CHUNK = 3 # Theseus needs at least 3 residues in order to work
class TRUNCATION_METHODS(Enum):
FOCUSED = 'focussed'
PERCENT = 'percent'
PERCENT_FIXED = 'percent_fixed_intervals'
SCORES = 'scores'
# Data structure to store residue information
ScoreVariances = collections.namedtuple("ScoreVariances", ["idx", "resSeq", "variance"])
def calculate_residues_focussed(var_by_res):
"""
The sweet spot for success seems to occur in the interval 5-40 residues.
Up till now we have always worked in 5% intervals, so 20 truncation levels
The new strategy is to ensure that always have at least half of the truncations in
the interval < 40 residues => 10 truncations in 40, so at least 4 residue chunks in this interval.
The strategy is therefore for < 80 residues, just split evenly into 20 chunks.
For > 80 residues, split < 40 into 10 4-residue chunks, and split the interval 40 -> end into
10 even chunks.
"""
length = len(var_by_res)
if length <= 80:
# Just split evenly into 20 chunks
return calculate_residues_by_percent(var_by_res, percent_truncation=5)
var_by_res.sort(key=lambda x: x.variance, reverse=False)
# Split a 40 - length interval into 10 even chunks.
llen = 40
lower_start = _split_sequence(llen, 10)
# Split remaining interval into 10 even chunks. We need to add the start sequence as we have
# removed llen residues
ulen = length - llen
upper_start = [i + llen for i in _split_sequence(ulen, 10)]
start_indexes = upper_start + lower_start
# Calculate the percentages for each of these start points
percentages = [int(round(float(start + 1) / float(length) * 100)) for start in start_indexes]
truncation_levels = percentages
idxs_all = [x.idx for x in var_by_res]
resseq_all = [x.resSeq for x in var_by_res]
variances = [x.variance for x in var_by_res]
truncation_residue_idxs = [sorted(idxs_all[: i + 1]) for i in start_indexes]
truncation_residues = [sorted(resseq_all[: i + 1]) for i in start_indexes]
# We take the variance of the most variable residue
truncation_variances = [variances[i] for i in start_indexes]
return truncation_levels, truncation_variances, truncation_residues, truncation_residue_idxs
def _calculate_start_indexes_from_fixed_percentages(percent_fixed_intervals, length, all_indexes):
"Calculate where in the list of residues each percentage bin starts"
def find_closest_index(nresidues_ideal, all_indexes):
min_dist = sys.maxsize
start = None
for idx in all_indexes:
rdist = abs(nresidues_ideal - (idx + 1))
if rdist <= min_dist and idx + 1 >= MIN_CHUNK:
min_dist = rdist
start = idx
assert start is not None
return start
indexes = []
for interval in percent_fixed_intervals:
nresidues_ideal = (length * int(interval)) / 100
start = find_closest_index(nresidues_ideal, all_indexes)
if start not in indexes:
indexes.append(start)
return indexes
def calculate_residues_by_percent(var_by_res, percent_truncation=None, percent_fixed_intervals=None):
"""Calculate the list of residues to keep if we are keeping the percentage of residues specified
in the list of integers percent_fixed_intervals"""
if percent_fixed_intervals:
if not (
isinstance(percent_fixed_intervals, list)
and len(percent_fixed_intervals) > 0
and min(percent_fixed_intervals) > 0
and max(percent_fixed_intervals) <= 100
):
raise RuntimeError("Incorrect percent_fixed_intervals argument: {}".format(percent_fixed_intervals))
# Get list of residue indices sorted by variance - from least to most
var_by_res.sort(key=lambda x: x.variance, reverse=False)
# indexes correspond to the number of the residue - 1
var_by_res_data = [(x.idx, x.resSeq, x.variance) for x in var_by_res]
all_indexes, all_resseq, variances = zip(*var_by_res_data)
length = len(var_by_res)
if percent_fixed_intervals:
start_idxs = _calculate_start_indexes_from_fixed_percentages(percent_fixed_intervals, length, all_indexes)
else:
start_idxs = _split_sequence(length, percent_truncation, min_chunk=MIN_CHUNK)
# Get list of residues to keep under the different intevals
truncation_levels = []
truncation_variances = []
truncation_residues = []
truncation_residue_idxs = []
for start in start_idxs:
percent = int(round(float(start + 1) / float(length) * 100))
if percent in truncation_levels:
continue
residues = all_resseq[: start + 1]
idxs = all_indexes[: start + 1]
thresh = variances[start] # For the threshold we take the threshold of the most variable residue
truncation_variances.append(thresh)
truncation_levels.append(percent)
truncation_residues.append(sorted(residues))
truncation_residue_idxs.append(sorted(idxs))
return truncation_levels, truncation_variances, truncation_residues, truncation_residue_idxs
def prune_residues(residues, chunk_size=1, allowed_gap=2):
"""Remove any residues that are < chunk_size where the gap before and after is > allowed_gap"""
assert chunk_size > 0 and allowed_gap > 0, "chunk_size and allowed_gap must be > 0!: {0} {1}".format(
chunk_size, allowed_gap
)
if not len(residues):
return residues, None
lenr = len(residues)
if lenr <= chunk_size:
return [], residues
# Build up a list of residues to remove
to_remove = []
start = residues[0]
last = residues[0]
this_residue = None
last_chunk_end = residues[0] - (allowed_gap + 1) # make sure starting gap is bigger than allowed
idxLast = lenr - 1
for i in xrange(1, idxLast + 1):
this_residue = residues[i]
if i == idxLast or this_residue != last + 1:
if i == idxLast and this_residue != last + 1:
start = this_residue
last_chunk_end = last
last = this_residue
postgap = allowed_gap + 1
elif i == idxLast and this_residue == last + 1:
last = this_residue
postgap = allowed_gap + 1
elif i != idxLast and this_residue != last + 1:
postgap = (this_residue - last) - 1
pregap = (start - last_chunk_end) - 1
this_chunk_size = (last - start) + 1
# remove if it satisfies the requirements
if this_chunk_size <= chunk_size and pregap >= allowed_gap and postgap >= allowed_gap:
chunk = [x for x in range(start, last + 1)]
to_remove += chunk
# reset start and last_chunk_end
start = this_residue
last_chunk_end = last
last = this_residue
# Remove the chunks and return
if len(to_remove):
return [r for r in residues if r not in to_remove], to_remove
else:
return residues, None
def _split_sequence(length, percent_interval, min_chunk=3):
"""Split a sequence of length into chunks each separated by percent_interval each being at least min_chunk size"""
if length <= min_chunk:
return [length - 1]
# How many residues should fit in each bin?
chunk_size = int(round(float(length) * float(percent_interval) / 100.0))
if chunk_size <= 0:
return [length - 1]
idxs = [length - 1]
while True:
start = idxs[-1] - chunk_size
if start <= 0:
break
remainder = start + 1
if remainder >= min_chunk:
idxs.append(start)
else:
break
return idxs
class Truncation(object):
"""Holds information relating to a single truncation of a cluster of models"""
def __init__(self):
self.cluster = None # The cluster object this truncation was created from
self.directory = None
self.level = None
self.method = None
self.models = None
self.percent = None
self.residues = None
self.residues_idxs = None
self.variances = None
@property
def num_residues(self):
return 0 if self.residues is None else len(self.residues)
def __str__(self):
"""Return a string representation of this object."""
_str = super(Truncation, self).__str__() + "\n"
# Iterate through all attributes in order
for k in sorted(self.__dict__.keys()):
_str += "{0} : {1}\n".format(k, self.__dict__[k])
return _str
class Truncator(object):
def __init__(self, work_dir):
"""Class to take one or more models and truncate them based on a supplied or generated metric"""
self.work_dir = work_dir
self.models = None
self.aligned_models = None
self.truncations = None
self.theseus_exe = None
# We keep these for bookeeping as they go in the ample dictionary
self.truncation_levels = None
self.truncation_variances = None
self.truncation_nresidues = None
def calculate_truncations(
self,
models=None,
truncation_method=None,
percent_truncation=None,
percent_fixed_intervals=None,
truncation_pruning=None,
residue_scores=None,
alignment_file=None,
homologs=False,
):
"""Returns a list of Truncation objects, one for each truncation level.
This method doesn't do any truncating - it just calculates the data for each truncation level.
"""
assert len(models) > 1 or residue_scores, "Cannot truncate as < 2 models!"
assert truncation_method and percent_truncation, "Missing arguments: {0} : {1}".format(
truncation_method, percent_truncation
)
assert ample_util.is_exe(self.theseus_exe), "Cannot find theseus_exe: {0}".format(self.theseus_exe)
# Create the directories we'll be working in
assert self.work_dir and os.path.isdir(self.work_dir), "truncate_models needs a self.work_dir"
os.chdir(self.work_dir)
self.models = models
# Calculate variances between pdb and align them (we currently only require the aligned models for homologs)
if truncation_method != TRUNCATION_METHODS.SCORES:
run_theseus = theseus.Theseus(work_dir=self.work_dir, theseus_exe=self.theseus_exe)
try:
run_theseus.superpose_models(self.models, homologs=homologs, alignment_file=alignment_file)
self.aligned_models = run_theseus.aligned_models
except RuntimeError as e:
logger.critical(e)
return []
if homologs:
# If using homologs, now trim down to the core. We only do this here so that we are using the aligned models from
# theseus, which makes it easier to see what the truncation is doing.
models = model_core_from_fasta(
self.aligned_models, alignment_file=alignment_file, work_dir=os.path.join(self.work_dir, 'core_models')
)
# Unfortunately Theseus doesn't print all residues in its output format, so we can't use the variances we calculated before and
# need to calculate the variances of the core models
try:
run_theseus.superpose_models(models, homologs=homologs, basename='homologs_core')
self.models = run_theseus.aligned_models
self.aligned_models = run_theseus.aligned_models
except RuntimeError as e:
logger.critical(e)
return []
if truncation_method == TRUNCATION_METHODS.SCORES:
var_by_res = self._convert_residue_scores(residue_scores)
else:
var_by_res = run_theseus.var_by_res
if len(var_by_res) <= 0:
raise RuntimeError("Error reading residue variances!")
logger.info('Using truncation method: %s', truncation_method)
# Calculate which residues to keep under the different methods
if truncation_method in [
TRUNCATION_METHODS.PERCENT,
TRUNCATION_METHODS.PERCENT_FIXED,
TRUNCATION_METHODS.SCORES,
]:
truncation_levels, truncation_variances, truncation_residues, truncation_residue_idxs = calculate_residues_by_percent(
var_by_res, percent_truncation=percent_truncation, percent_fixed_intervals=percent_fixed_intervals
)
elif truncation_method == TRUNCATION_METHODS.FOCUSED:
truncation_levels, truncation_variances, truncation_residues, truncation_residue_idxs = calculate_residues_focussed(
var_by_res
)
else:
raise RuntimeError("Unrecognised ensembling mode: {}".format(truncation_method))
# Somewhat of a hack to save the data so we can put it in the amoptd
self.truncation_levels = truncation_levels
self.truncation_variances = truncation_variances
self.truncation_nresidues = [len(r) for r in truncation_residues]
truncations = []
for tlevel, tvar, tresidues, tresidue_idxs in zip(
truncation_levels, truncation_variances, truncation_residues, truncation_residue_idxs
):
# Prune singletone/doubletone etc. residues if required
if truncation_pruning == 'single':
logger.debug("truncation_pruning: %s", truncation_pruning)
tresidue_idxs, pruned_residues = prune_residues(tresidue_idxs, chunk_size=1, allowed_gap=2)
if pruned_residues:
logger.debug("prune_residues removing: %s", pruned_residues)
elif truncation_pruning is None:
pass
else:
raise RuntimeError("Unrecognised truncation_pruning: {0}".format(truncation_pruning))
# Skip if there are no residues
if not tresidue_idxs:
logger.debug("Skipping truncation level %s with variance %s as no residues", tlevel, tvar)
continue
truncation = Truncation()
truncation.method = truncation_method
truncation.percent = percent_truncation
truncation.level = tlevel
truncation.variances = tvar
truncation.residues = tresidues
truncation.residues_idxs = tresidue_idxs
truncations.append(truncation)
return truncations
def truncate_models(
self,
models,
max_cluster_size=200,
truncation_method=None,
percent_truncation=None,
percent_fixed_intervals=None,
truncation_pruning=None,
residue_scores=None,
homologs=False,
alignment_file=None,
work_dir=None,
):
"""Generate a set of Truncation objects, referencing a set of truncated models generated from the supplied models"""
truncations = self.calculate_truncations(
models=models,
truncation_method=truncation_method,
percent_truncation=percent_truncation,
percent_fixed_intervals=percent_fixed_intervals,
truncation_pruning=truncation_pruning,
residue_scores=residue_scores,
alignment_file=alignment_file,
homologs=homologs,
)
if truncations is None or len(truncations) < 1:
logger.critical("Unable to truncate the ensembles - no viable truncations")
return []
# Loop through the Truncation objects, truncating the models based on the truncation data and adding
# the truncated models to the Truncation.models attribute
for truncation in truncations:
truncation.directory = os.path.join(self.work_dir, 'tlevel_{0}'.format(truncation.level))
os.mkdir(truncation.directory)
logger.info('Truncating at: %s in directory %s', truncation.level, truncation.directory)
truncation.models = []
for infile in self.models:
pdbout = ample_util.filename_append(infile, str(truncation.level), directory=truncation.directory)
# Loop through PDB files and create new ones that only contain the residues left after truncation
pdb_edit.select_residues(pdbin=infile, pdbout=pdbout, tokeep_idx=truncation.residues_idxs)
truncation.models.append(pdbout)
self.truncations = truncations
return truncations
@staticmethod
def _convert_residue_scores(residue_scores):
"""Create named tuple to match store residue data"""
scores = [
ScoreVariances(idx=int(res) - 1, resSeq=int(res), variance=float(sco)) # Required to match Theseus
for (res, sco) in residue_scores
]
return scores
|
bsd-3-clause
|
hizardapp/Hizard
|
hyrodactil/customisable_emails/views.py
|
1
|
1940
|
from django.core.urlresolvers import reverse_lazy
from django.template import Context, Template, TemplateSyntaxError
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import View
from braces.views import LoginRequiredMixin, AjaxResponseMixin, JSONResponseMixin
from core.views import RestrictedListView, RestrictedUpdateView, MessageMixin
from .models import EmailTemplate
from .forms import EmailTemplateForm
TEST_CONTEXT = Context(dict(
applicant_first_name=mark_safe(u"Quentin"),
applicant_last_name=mark_safe(u"Potter"),
opening=mark_safe("Professor of magic"),
company=mark_safe("Magic & Co.")))
class CustomisableEmailsListView(LoginRequiredMixin, RestrictedListView):
model = EmailTemplate
class CustomisableEmailsUpdateView(LoginRequiredMixin, MessageMixin, RestrictedUpdateView):
model = EmailTemplate
form_class = EmailTemplateForm
success_url = reverse_lazy("customisable_emails:list")
success_message = _('Email edited')
def get_context_data(self, **kwargs):
context = super(
CustomisableEmailsUpdateView, self
).get_context_data(**kwargs)
context["TEST_CONTEXT"] = TEST_CONTEXT
return context
class TestEmailTemplateRendererView(
LoginRequiredMixin,
AjaxResponseMixin,
JSONResponseMixin,
View
):
def post_ajax(self, request, *args, **kwargs):
try:
subject_template = Template(request.POST.get("subject"))
subject = subject_template.render(TEST_CONTEXT)
except TemplateSyntaxError as e:
subject = str(e)
try:
body_template = Template(request.POST.get("body"))
body = body_template.render(TEST_CONTEXT)
except TemplateSyntaxError as e:
body = str(e)
return self.render_json_response(dict(subject=subject, body=body))
|
mit
|
knowledgepoint-devs/askbot-devel
|
askbot/management/commands/createsuperuser.py
|
6
|
4589
|
from django.contrib.auth.management.commands.createsuperuser import *
from django.db.models.signals import pre_save, post_save
class Command(Command):
def handle(self, *args, **options):
username = options.get('username', None)
email = options.get('email', None)
interactive = options.get('interactive')
verbosity = int(options.get('verbosity', 1))
# Do quick and dirty validation if --noinput
if not interactive:
if not username or not email:
raise CommandError("You must use --username and --email with --noinput.")
if not RE_VALID_USERNAME.match(username):
raise CommandError("Invalid username. Use only letters, digits, and underscores")
try:
is_valid_email(email)
except exceptions.ValidationError:
raise CommandError("Invalid email address.")
# If not provided, create the user with an unusable password
password = None
# Try to determine the current system user's username to use as a default.
try:
default_username = getpass.getuser().replace(' ', '').lower()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
default_username = ''
# Determine whether the default username is taken, so we don't display
# it as an option.
if default_username:
try:
User.objects.get(username=default_username)
except User.DoesNotExist:
pass
else:
default_username = ''
# Prompt for username/email/password. Enclose this whole thing in a
# try/except to trap for a keyboard interrupt and exit gracefully.
if interactive:
try:
# Get a username
while 1:
if not username:
input_msg = 'Username'
if default_username:
input_msg += ' (Leave blank to use %r)' % default_username
username = raw_input(input_msg + ': ')
if default_username and username == '':
username = default_username
if not RE_VALID_USERNAME.match(username):
sys.stderr.write("Error: That username is invalid. Use only letters, digits and underscores.\n")
username = None
continue
try:
User.objects.get(username=username)
except User.DoesNotExist:
break
else:
sys.stderr.write("Error: That username is already taken.\n")
username = None
# Get an email
while 1:
if not email:
email = raw_input('E-mail address: ')
try:
is_valid_email(email)
except exceptions.ValidationError:
sys.stderr.write("Error: That e-mail address is invalid.\n")
email = None
else:
break
# Get a password
while 1:
if not password:
password = getpass.getpass()
password2 = getpass.getpass('Password (again): ')
if password != password2:
sys.stderr.write("Error: Your passwords didn't match.\n")
password = None
continue
if password.strip() == '':
sys.stderr.write("Error: Blank passwords aren't allowed.\n")
password = None
continue
break
except KeyboardInterrupt:
sys.stderr.write("\nOperation cancelled.\n")
sys.exit(1)
self.remove_signals()
u = User.objects.create_superuser(username, email, password)
u.set_status('d')
if verbosity >= 1:
self.stdout.write("Askbot Superuser created successfully.\n")
def remove_signals(self):
pre_save.receivers = []
post_save.receivers = []
|
gpl-3.0
|
gamahead/nupic
|
nupic/encoders/scalar.py
|
8
|
25295
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numbers
import capnp
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaType
from nupic.bindings.math import SM32, GetNTAReal
from nupic.encoders.base import Encoder, EncoderResult
DEFAULT_RADIUS = 0
DEFAULT_RESOLUTION = 0
class ScalarEncoder(Encoder):
"""
A scalar encoder encodes a numeric (floating point) value into an array
of bits. The output is 0's except for a contiguous block of 1's. The
location of this contiguous block varies continuously with the input value.
The encoding is linear. If you want a nonlinear encoding, just transform
the scalar (e.g. by applying a logarithm function) before encoding.
It is not recommended to bin the data as a pre-processing step, e.g.
"1" = $0 - $.20, "2" = $.21-$0.80, "3" = $.81-$1.20, etc. as this
removes a lot of information and prevents nearby values from overlapping
in the output. Instead, use a continuous transformation that scales
the data (a piecewise transformation is fine).
Parameters:
-----------------------------------------------------------------------------
w -- The number of bits that are set to encode a single value - the
"width" of the output signal
restriction: w must be odd to avoid centering problems.
minval -- The minimum value of the input signal.
maxval -- The upper bound of the input signal
periodic -- If true, then the input value "wraps around" such that minval = maxval
For a periodic value, the input must be strictly less than maxval,
otherwise maxval is a true upper bound.
There are three mutually exclusive parameters that determine the overall size of
of the output. Only one of these should be specifed to the constructor:
n -- The number of bits in the output. Must be greater than or equal to w
radius -- Two inputs separated by more than the radius have non-overlapping
representations. Two inputs separated by less than the radius will
in general overlap in at least some of their bits. You can think
of this as the radius of the input.
resolution -- Two inputs separated by greater than, or equal to the resolution are guaranteed
to have different representations.
Note: radius and resolution are specified w.r.t the input, not output. w is
specified w.r.t. the output.
Example:
day of week.
w = 3
Minval = 1 (Monday)
Maxval = 8 (Monday)
periodic = true
n = 14
[equivalently: radius = 1.5 or resolution = 0.5]
The following values would encode midnight -- the start of the day
monday (1) -> 11000000000001
tuesday(2) -> 01110000000000
wednesday(3) -> 00011100000000
...
sunday (7) -> 10000000000011
Since the resolution is 12 hours, we can also encode noon, as
monday noon -> 11100000000000
monday midnt-> 01110000000000
tuesday noon -> 00111000000000
etc.
It may not be natural to specify "n", especially with non-periodic
data. For example, consider encoding an input with a range of 1-10
(inclusive) using an output width of 5. If you specify resolution =
1, this means that inputs of 1 and 2 have different outputs, though
they overlap, but 1 and 1.5 might not have different outputs.
This leads to a 14-bit representation like this:
1 -> 11111000000000 (14 bits total)
2 -> 01111100000000
...
10-> 00000000011111
[resolution = 1; n=14; radius = 5]
You could specify resolution = 0.5, which gives
1 -> 11111000... (22 bits total)
1.5 -> 011111.....
2.0 -> 0011111....
[resolution = 0.5; n=22; radius=2.5]
You could specify radius = 1, which gives
1 -> 111110000000.... (50 bits total)
2 -> 000001111100....
3 -> 000000000011111...
...
10 -> .....000011111
[radius = 1; resolution = 0.2; n=50]
An N/M encoding can also be used to encode a binary value,
where we want more than one bit to represent each state.
For example, we could have: w = 5, minval = 0, maxval = 1,
radius = 1 (which is equivalent to n=10)
0 -> 1111100000
1 -> 0000011111
Implementation details:
--------------------------------------------------------------------------
range = maxval - minval
h = (w-1)/2 (half-width)
resolution = radius / w
n = w * range/radius (periodic)
n = w * range/radius + 2 * h (non-periodic)
"""
def __init__(self,
w,
minval,
maxval,
periodic=False,
n=0,
radius=DEFAULT_RADIUS,
resolution=DEFAULT_RESOLUTION,
name=None,
verbosity=0,
clipInput=False,
forced=False):
"""
w -- number of bits to set in output
minval -- minimum input value
maxval -- maximum input value (input is strictly less if periodic == True)
Exactly one of n, radius, resolution must be set. "0" is a special
value that means "not set".
n -- number of bits in the representation (must be > w)
radius -- inputs separated by more than, or equal to this distance will have non-overlapping
representations
resolution -- inputs separated by more than, or equal to this distance will have different
representations
name -- an optional string which will become part of the description
clipInput -- if true, non-periodic inputs smaller than minval or greater
than maxval will be clipped to minval/maxval
forced -- if true, skip some safety checks (for compatibility reasons), default false
See class documentation for more information.
"""
assert isinstance(w, numbers.Integral)
self.encoders = None
self.verbosity = verbosity
self.w = w
if (w % 2 == 0):
raise Exception("Width must be an odd number (%f)" % w)
self.minval = minval
self.maxval = maxval
self.periodic = periodic
self.clipInput = clipInput
self.halfwidth = (w - 1) / 2
# For non-periodic inputs, padding is the number of bits "outside" the range,
# on each side. I.e. the representation of minval is centered on some bit, and
# there are "padding" bits to the left of that centered bit; similarly with
# bits to the right of the center bit of maxval
if self.periodic:
self.padding = 0
else:
self.padding = self.halfwidth
if (minval is not None and maxval is not None):
if (minval >= maxval):
raise Exception("The encoder for %s is invalid. minval %s is greater than "
"or equal to maxval %s. minval must be strictly less "
"than maxval." % (name, minval, maxval))
self.rangeInternal = float(self.maxval - self.minval)
# There are three different ways of thinking about the representation. Handle
# each case here.
self._initEncoder(w, minval, maxval, n, radius, resolution)
# nInternal represents the output area excluding the possible padding on each
# side
if (minval is not None and maxval is not None):
self.nInternal = self.n - 2 * self.padding
# Our name
if name is not None:
self.name = name
else:
self.name = "[%s:%s]" % (self.minval, self.maxval)
# This matrix is used for the topDownCompute. We build it the first time
# topDownCompute is called
self._topDownMappingM = None
self._topDownValues = None
# This list is created by getBucketValues() the first time it is called,
# and re-created whenever our buckets would be re-arranged.
self._bucketValues = None
# checks for likely mistakes in encoder settings
if not forced:
self._checkReasonableSettings()
def _initEncoder(self, w, minval, maxval, n, radius, resolution):
""" (helper function) There are three different ways of thinking about the representation.
Handle each case here."""
if n != 0:
if (radius !=0 or resolution != 0):
raise ValueError("Only one of n/radius/resolution can be specified for a ScalarEncoder")
assert n > w
self.n = n
if (minval is not None and maxval is not None):
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
else:
if radius != 0:
if (resolution != 0):
raise ValueError("Only one of radius/resolution can be specified for a ScalarEncoder")
self.radius = radius
self.resolution = float(self.radius) / w
elif resolution != 0:
self.resolution = float(resolution)
self.radius = self.resolution * self.w
else:
raise Exception("One of n, radius, resolution must be specified for a ScalarEncoder")
if (minval is not None and maxval is not None):
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
nfloat = self.w * (self.range / self.radius) + 2 * self.padding
self.n = int(math.ceil(nfloat))
def _checkReasonableSettings(self):
"""(helper function) check if the settings are reasonable for SP to work"""
# checks for likely mistakes in encoder settings
if self.w < 21:
raise ValueError("Number of bits in the SDR (%d) must be greater than 2, and recommended >= 21 (use forced=True to override)"
% self.w)
def getDecoderOutputFieldTypes(self):
""" [Encoder class virtual method override]
"""
return (FieldMetaType.float, )
def getWidth(self):
return self.n
def _recalcParams(self):
self.rangeInternal = float(self.maxval - self.minval)
if not self.periodic:
self.resolution = float(self.rangeInternal) / (self.n - self.w)
else:
self.resolution = float(self.rangeInternal) / (self.n)
self.radius = self.w * self.resolution
if self.periodic:
self.range = self.rangeInternal
else:
self.range = self.rangeInternal + self.resolution
name = "[%s:%s]" % (self.minval, self.maxval)
def getDescription(self):
return [(self.name, 0)]
def _getFirstOnBit(self, input):
""" Return the bit offset of the first bit to be set in the encoder output.
For periodic encoders, this can be a negative number when the encoded output
wraps around. """
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
else:
if input < self.minval:
# Don't clip periodic inputs. Out-of-range input is always an error
if self.clipInput and not self.periodic:
if self.verbosity > 0:
print "Clipped input %s=%.2f to minval %.2f" % (self.name, input,
self.minval)
input = self.minval
else:
raise Exception('input (%s) less than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
# Don't clip periodic inputs. Out-of-range input is always an error
if input >= self.maxval:
raise Exception('input (%s) greater than periodic range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
else:
if input > self.maxval:
if self.clipInput:
if self.verbosity > 0:
print "Clipped input %s=%.2f to maxval %.2f" % (self.name, input,
self.maxval)
input = self.maxval
else:
raise Exception('input (%s) greater than range (%s - %s)' %
(str(input), str(self.minval), str(self.maxval)))
if self.periodic:
centerbin = int((input - self.minval) * self.nInternal / self.range) \
+ self.padding
else:
centerbin = int(((input - self.minval) + self.resolution/2) \
/ self.resolution ) + self.padding
# We use the first bit to be set in the encoded output as the bucket index
minbin = centerbin - self.halfwidth
return [minbin]
def getBucketIndices(self, input):
""" See method description in base.py """
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
if input == SENTINEL_VALUE_FOR_MISSING_DATA:
return [None]
minbin = self._getFirstOnBit(input)[0]
# For periodic encoders, the bucket index is the index of the center bit
if self.periodic:
bucketIdx = minbin + self.halfwidth
if bucketIdx < 0:
bucketIdx += self.n
# for non-periodic encoders, the bucket index is the index of the left bit
else:
bucketIdx = minbin
return [bucketIdx]
def encodeIntoArray(self, input, output, learn=True):
""" See method description in base.py """
if input is not None and not isinstance(input, numbers.Number):
raise TypeError(
"Expected a scalar input but got input of type %s" % type(input))
if type(input) is float and math.isnan(input):
input = SENTINEL_VALUE_FOR_MISSING_DATA
# Get the bucket index to use
bucketIdx = self._getFirstOnBit(input)[0]
if bucketIdx is None:
# None is returned for missing value
output[0:self.n] = 0 #TODO: should all 1s, or random SDR be returned instead?
else:
# The bucket index is the index of the first bit to set in the output
output[:self.n] = 0
minbin = bucketIdx
maxbin = minbin + 2*self.halfwidth
if self.periodic:
# Handle the edges by computing wrap-around
if maxbin >= self.n:
bottombins = maxbin - self.n + 1
output[:bottombins] = 1
maxbin = self.n - 1
if minbin < 0:
topbins = -minbin
output[self.n - topbins:self.n] = 1
minbin = 0
assert minbin >= 0
assert maxbin < self.n
# set the output (except for periodic wraparound)
output[minbin:maxbin + 1] = 1
# Debug the decode() method
if self.verbosity >= 2:
print
print "input:", input
print "range:", self.minval, "-", self.maxval
print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \
"radius", self.radius, "periodic:", self.periodic
print "output:",
self.pprint(output)
print "input desc:", self.decodedToStr(self.decode(output))
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# For now, we simply assume any top-down output greater than 0
# is ON. Eventually, we will probably want to incorporate the strength
# of each top-down output.
tmpOutput = numpy.array(encoded[:self.n] > 0).astype(encoded.dtype)
if not tmpOutput.any():
return (dict(), [])
# ------------------------------------------------------------------------
# First, assume the input pool is not sampled 100%, and fill in the
# "holes" in the encoded representation (which are likely to be present
# if this is a coincidence that was learned by the SP).
# Search for portions of the output that have "holes"
maxZerosInARow = self.halfwidth
for i in xrange(maxZerosInARow):
searchStr = numpy.ones(i + 3, dtype=encoded.dtype)
searchStr[1:-1] = 0
subLen = len(searchStr)
# Does this search string appear in the output?
if self.periodic:
for j in xrange(self.n):
outputIndices = numpy.arange(j, j + subLen)
outputIndices %= self.n
if numpy.array_equal(searchStr, tmpOutput[outputIndices]):
tmpOutput[outputIndices] = 1
else:
for j in xrange(self.n - subLen + 1):
if numpy.array_equal(searchStr, tmpOutput[j:j + subLen]):
tmpOutput[j:j + subLen] = 1
if self.verbosity >= 2:
print "raw output:", encoded[:self.n]
print "filtered output:", tmpOutput
# ------------------------------------------------------------------------
# Find each run of 1's.
nz = tmpOutput.nonzero()[0]
runs = [] # will be tuples of (startIdx, runLength)
run = [nz[0], 1]
i = 1
while (i < len(nz)):
if nz[i] == run[0] + run[1]:
run[1] += 1
else:
runs.append(run)
run = [nz[i], 1]
i += 1
runs.append(run)
# If we have a periodic encoder, merge the first and last run if they
# both go all the way to the edges
if self.periodic and len(runs) > 1:
if runs[0][0] == 0 and runs[-1][0] + runs[-1][1] == self.n:
runs[-1][1] += runs[0][1]
runs = runs[1:]
# ------------------------------------------------------------------------
# Now, for each group of 1's, determine the "left" and "right" edges, where
# the "left" edge is inset by halfwidth and the "right" edge is inset by
# halfwidth.
# For a group of width w or less, the "left" and "right" edge are both at
# the center position of the group.
ranges = []
for run in runs:
(start, runLen) = run
if runLen <= self.w:
left = right = start + runLen / 2
else:
left = start + self.halfwidth
right = start + runLen - 1 - self.halfwidth
# Convert to input space.
if not self.periodic:
inMin = (left - self.padding) * self.resolution + self.minval
inMax = (right - self.padding) * self.resolution + self.minval
else:
inMin = (left - self.padding) * self.range / self.nInternal + self.minval
inMax = (right - self.padding) * self.range / self.nInternal + self.minval
# Handle wrap-around if periodic
if self.periodic:
if inMin >= self.maxval:
inMin -= self.range
inMax -= self.range
# Clip low end
if inMin < self.minval:
inMin = self.minval
if inMax < self.minval:
inMax = self.minval
# If we have a periodic encoder, and the max is past the edge, break into
# 2 separate ranges
if self.periodic and inMax >= self.maxval:
ranges.append([inMin, self.maxval])
ranges.append([self.minval, inMax - self.range])
else:
if inMax > self.maxval:
inMax = self.maxval
if inMin > self.maxval:
inMin = self.maxval
ranges.append([inMin, inMax])
desc = self._generateRangeDescription(ranges)
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (ranges, desc)}, [fieldName])
def _generateRangeDescription(self, ranges):
"""generate description from a text description of the ranges"""
desc = ""
numRanges = len(ranges)
for i in xrange(numRanges):
if ranges[i][0] != ranges[i][1]:
desc += "%.2f-%.2f" % (ranges[i][0], ranges[i][1])
else:
desc += "%.2f" % (ranges[i][0])
if i < numRanges - 1:
desc += ", "
return desc
def _getTopDownMapping(self):
""" Return the interal _topDownMappingM matrix used for handling the
bucketInfo() and topDownCompute() methods. This is a matrix, one row per
category (bucket) where each row contains the encoded output for that
category.
"""
# Do we need to build up our reverse mapping table?
if self._topDownMappingM is None:
# The input scalar value corresponding to each possible output encoding
if self.periodic:
self._topDownValues = numpy.arange(self.minval + self.resolution / 2.0,
self.maxval,
self.resolution)
else:
#Number of values is (max-min)/resolutions
self._topDownValues = numpy.arange(self.minval,
self.maxval + self.resolution / 2.0,
self.resolution)
# Each row represents an encoded output pattern
numCategories = len(self._topDownValues)
self._topDownMappingM = SM32(numCategories, self.n)
outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())
for i in xrange(numCategories):
value = self._topDownValues[i]
value = max(value, self.minval)
value = min(value, self.maxval)
self.encodeIntoArray(value, outputSpace, learn=False)
self._topDownMappingM.setRowFromDense(i, outputSpace)
return self._topDownMappingM
def getBucketValues(self):
""" See the function description in base.py """
# Need to re-create?
if self._bucketValues is None:
topDownMappingM = self._getTopDownMapping()
numBuckets = topDownMappingM.nRows()
self._bucketValues = []
for bucketIdx in range(numBuckets):
self._bucketValues.append(self.getBucketInfo([bucketIdx])[0].value)
return self._bucketValues
def getBucketInfo(self, buckets):
""" See the function description in base.py """
# Get/generate the topDown mapping table
#NOTE: although variable topDownMappingM is unused, some (bad-style) actions
#are executed during _getTopDownMapping() so this line must stay here
topDownMappingM = self._getTopDownMapping()
# The "category" is simply the bucket index
category = buckets[0]
encoding = self._topDownMappingM.getRow(category)
# Which input value does this correspond to?
if self.periodic:
inputVal = (self.minval + (self.resolution / 2.0) +
(category * self.resolution))
else:
inputVal = self.minval + (category * self.resolution)
return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]
def topDownCompute(self, encoded):
""" See the function description in base.py
"""
# Get/generate the topDown mapping table
topDownMappingM = self._getTopDownMapping()
# See which "category" we match the closest.
category = topDownMappingM.rightVecProd(encoded).argmax()
# Return that bucket info
return self.getBucketInfo([category])
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
"""
expValue = expValues[0]
actValue = actValues[0]
if self.periodic:
expValue = expValue % self.maxval
actValue = actValue % self.maxval
err = abs(expValue - actValue)
if self.periodic:
err = min(err, self.maxval - err)
if fractional:
pctErr = float(err) / (self.maxval - self.minval)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
closeness = err
return numpy.array([closeness])
def dump(self):
print "ScalarEncoder:"
print " min: %f" % self.minval
print " max: %f" % self.maxval
print " w: %d" % self.w
print " n: %d" % self.n
print " resolution: %f" % self.resolution
print " radius: %f" % self.radius
print " periodic: %s" % self.periodic
print " nInternal: %d" % self.nInternal
print " rangeInternal: %f" % self.rangeInternal
print " padding: %d" % self.padding
@classmethod
def read(cls, proto):
if proto.n is not None:
radius = DEFAULT_RADIUS
resolution = DEFAULT_RESOLUTION
else:
radius = proto.radius
resolution = proto.resolution
return cls(w=proto.w,
minval=proto.minval,
maxval=proto.maxval,
periodic=proto.periodic,
n=proto.n,
name=proto.name,
verbosity=proto.verbosity,
clipInput=proto.clipInput,
forced=True)
def write(self, proto):
proto.w = self.w
proto.minval = self.minval
proto.maxval = self.maxval
proto.periodic = self.periodic
# Radius and resolution can be recalculated based on n
proto.n = self.n
proto.name = self.name
proto.verbosity = self.verbosity
proto.clipInput = self.clipInput
|
gpl-3.0
|
jamespcole/home-assistant
|
homeassistant/components/weather/__init__.py
|
10
|
5177
|
"""Weather component that handles meteorological data for your location."""
from datetime import timedelta
import logging
from homeassistant.const import PRECISION_TENTHS, PRECISION_WHOLE, TEMP_CELSIUS
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
_LOGGER = logging.getLogger(__name__)
ATTR_CONDITION_CLASS = 'condition_class'
ATTR_FORECAST = 'forecast'
ATTR_FORECAST_CONDITION = 'condition'
ATTR_FORECAST_PRECIPITATION = 'precipitation'
ATTR_FORECAST_TEMP = 'temperature'
ATTR_FORECAST_TEMP_LOW = 'templow'
ATTR_FORECAST_TIME = 'datetime'
ATTR_FORECAST_WIND_BEARING = 'wind_bearing'
ATTR_FORECAST_WIND_SPEED = 'wind_speed'
ATTR_WEATHER_ATTRIBUTION = 'attribution'
ATTR_WEATHER_HUMIDITY = 'humidity'
ATTR_WEATHER_OZONE = 'ozone'
ATTR_WEATHER_PRESSURE = 'pressure'
ATTR_WEATHER_TEMPERATURE = 'temperature'
ATTR_WEATHER_VISIBILITY = 'visibility'
ATTR_WEATHER_WIND_BEARING = 'wind_bearing'
ATTR_WEATHER_WIND_SPEED = 'wind_speed'
DOMAIN = 'weather'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Set up the weather component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class WeatherEntity(Entity):
"""ABC for weather data."""
@property
def temperature(self):
"""Return the platform temperature."""
raise NotImplementedError()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
raise NotImplementedError()
@property
def pressure(self):
"""Return the pressure."""
return None
@property
def humidity(self):
"""Return the humidity."""
raise NotImplementedError()
@property
def wind_speed(self):
"""Return the wind speed."""
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
return None
@property
def ozone(self):
"""Return the ozone level."""
return None
@property
def attribution(self):
"""Return the attribution."""
return None
@property
def visibility(self):
"""Return the visibility."""
return None
@property
def forecast(self):
"""Return the forecast."""
return None
@property
def precision(self):
"""Return the forecast."""
return PRECISION_TENTHS if self.temperature_unit == TEMP_CELSIUS \
else PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the state attributes."""
data = {
ATTR_WEATHER_TEMPERATURE: show_temp(
self.hass, self.temperature, self.temperature_unit,
self.precision),
}
humidity = self.humidity
if humidity is not None:
data[ATTR_WEATHER_HUMIDITY] = round(humidity)
ozone = self.ozone
if ozone is not None:
data[ATTR_WEATHER_OZONE] = ozone
pressure = self.pressure
if pressure is not None:
data[ATTR_WEATHER_PRESSURE] = pressure
wind_bearing = self.wind_bearing
if wind_bearing is not None:
data[ATTR_WEATHER_WIND_BEARING] = wind_bearing
wind_speed = self.wind_speed
if wind_speed is not None:
data[ATTR_WEATHER_WIND_SPEED] = wind_speed
visibility = self.visibility
if visibility is not None:
data[ATTR_WEATHER_VISIBILITY] = visibility
attribution = self.attribution
if attribution is not None:
data[ATTR_WEATHER_ATTRIBUTION] = attribution
if self.forecast is not None:
forecast = []
for forecast_entry in self.forecast:
forecast_entry = dict(forecast_entry)
forecast_entry[ATTR_FORECAST_TEMP] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP],
self.temperature_unit, self.precision)
if ATTR_FORECAST_TEMP_LOW in forecast_entry:
forecast_entry[ATTR_FORECAST_TEMP_LOW] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP_LOW],
self.temperature_unit, self.precision)
forecast.append(forecast_entry)
data[ATTR_FORECAST] = forecast
return data
@property
def state(self):
"""Return the current state."""
return self.condition
@property
def condition(self):
"""Return the current condition."""
raise NotImplementedError()
|
apache-2.0
|
ksmit799/Toontown-Source
|
toontown/coghq/PromotionManagerAI.py
|
1
|
3193
|
from otp.ai.AIBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
import random
from toontown.suit import SuitDNA
import CogDisguiseGlobals
from toontown.toonbase.ToontownBattleGlobals import getInvasionMultiplier
MeritMultiplier = 0.5
class PromotionManagerAI:
notify = DirectNotifyGlobal.directNotify.newCategory('PromotionManagerAI')
def __init__(self, air):
self.air = air
def getPercentChance(self):
return 100.0
def recoverMerits(self, av, cogList, zoneId, multiplier = 1, extraMerits = None):
avId = av.getDoId()
meritsRecovered = [0,
0,
0,
0]
if extraMerits is None:
extraMerits = [0,
0,
0,
0]
if self.air.suitInvasionManager.getInvading():
multiplier *= getInvasionMultiplier()
for i in range(len(extraMerits)):
if CogDisguiseGlobals.isSuitComplete(av.getCogParts(), i):
meritsRecovered[i] += extraMerits[i]
self.notify.debug('recoverMerits: extra merits = %s' % extraMerits[i])
self.notify.debug('recoverMerits: multiplier = %s' % multiplier)
for cogDict in cogList:
dept = SuitDNA.suitDepts.index(cogDict['track'])
if avId in cogDict['activeToons']:
if CogDisguiseGlobals.isSuitComplete(av.getCogParts(), SuitDNA.suitDepts.index(cogDict['track'])):
self.notify.debug('recoverMerits: checking against cogDict: %s' % cogDict)
rand = random.random() * 100
if rand <= self.getPercentChance() and not cogDict['isVirtual']:
merits = cogDict['level'] * MeritMultiplier
merits = int(round(merits))
if cogDict['hasRevives']:
merits *= 2
merits = merits * multiplier
merits = int(round(merits))
meritsRecovered[dept] += merits
self.notify.debug('recoverMerits: merits = %s' % merits)
else:
self.notify.debug('recoverMerits: virtual cog!')
if meritsRecovered != [0,
0,
0,
0]:
actualCounted = [0,
0,
0,
0]
merits = av.getCogMerits()
for i in range(len(meritsRecovered)):
max = CogDisguiseGlobals.getTotalMerits(av, i)
if max:
if merits[i] + meritsRecovered[i] <= max:
actualCounted[i] = meritsRecovered[i]
merits[i] += meritsRecovered[i]
else:
actualCounted[i] = max - merits[i]
merits[i] = max
av.b_setCogMerits(merits)
if reduce(lambda x, y: x + y, actualCounted):
self.air.writeServerEvent('merits', avId, '%s|%s|%s|%s' % tuple(actualCounted))
self.notify.debug('recoverMerits: av %s recovered merits %s' % (avId, actualCounted))
return meritsRecovered
|
mit
|
SlimRoms/android_external_chromium
|
net/tools/testserver/testserver.py
|
66
|
51380
|
#!/usr/bin/python2.4
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server used for testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
By default, it listens on an ephemeral port and sends the port number back to
the originating process over a pipe. The originating process can specify an
explicit port if necessary.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
"""
import asyncore
import base64
import BaseHTTPServer
import cgi
import errno
import optparse
import os
import re
import select
import simplejson
import SocketServer
import socket
import sys
import struct
import time
import urlparse
import warnings
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyftpdlib.ftpserver
import tlslite
import tlslite.api
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
if sys.platform == 'win32':
import msvcrt
SERVER_HTTP = 0
SERVER_FTP = 1
SERVER_SYNC = 2
# Using debug() seems to cause hangs on XP: see http://crbug.com/64515 .
debug_output = sys.stderr
def debug(str):
debug_output.write(str + "\n")
debug_output.flush()
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn, StoppableHTTPServer):
"""This is a specialization of StoppableHTTPerver that add https support."""
def __init__(self, server_address, request_hander_class, cert_path,
ssl_client_auth, ssl_client_cas, ssl_bulk_ciphers):
s = open(cert_path).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.cert_chain = tlslite.api.X509CertChain([x509])
s = open(cert_path).read()
self.private_key = tlslite.api.parsePEMKey(s, private=True)
self.ssl_client_auth = ssl_client_auth
self.ssl_client_cas = []
for ca_file in ssl_client_cas:
s = open(ca_file).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.ssl_client_cas.append(x509.subject)
self.ssl_handshake_settings = tlslite.api.HandshakeSettings()
if ssl_bulk_ciphers is not None:
self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers
self.session_cache = tlslite.api.SessionCache()
StoppableHTTPServer.__init__(self, server_address, request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache,
reqCert=self.ssl_client_auth,
settings=self.ssl_handshake_settings,
reqCAs=self.ssl_client_cas)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSAbruptCloseError:
# Ignore abrupt close.
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class SyncHTTPServer(StoppableHTTPServer):
"""An HTTP server that handles sync commands."""
def __init__(self, server_address, request_handler_class):
# We import here to avoid pulling in chromiumsync's dependencies
# unless strictly necessary.
import chromiumsync
import xmppserver
StoppableHTTPServer.__init__(self, server_address, request_handler_class)
self._sync_handler = chromiumsync.TestServer()
self._xmpp_socket_map = {}
self._xmpp_server = xmppserver.XmppServer(
self._xmpp_socket_map, ('localhost', 0))
self.xmpp_port = self._xmpp_server.getsockname()[1]
def HandleCommand(self, query, raw_request):
return self._sync_handler.HandleCommand(query, raw_request)
def HandleRequestNoBlock(self):
"""Handles a single request.
Copied from SocketServer._handle_request_noblock().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def serve_forever(self):
"""This is a merge of asyncore.loop() and SocketServer.serve_forever().
"""
def HandleXmppSocket(fd, socket_map, handler):
"""Runs the handler for the xmpp connection for fd.
Adapted from asyncore.read() et al.
"""
xmpp_connection = socket_map.get(fd)
# This could happen if a previous handler call caused fd to get
# removed from socket_map.
if xmpp_connection is None:
return
try:
handler(xmpp_connection)
except (asyncore.ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
xmpp_connection.handle_error()
while True:
read_fds = [ self.fileno() ]
write_fds = []
exceptional_fds = []
for fd, xmpp_connection in self._xmpp_socket_map.items():
is_r = xmpp_connection.readable()
is_w = xmpp_connection.writable()
if is_r:
read_fds.append(fd)
if is_w:
write_fds.append(fd)
if is_r or is_w:
exceptional_fds.append(fd)
try:
read_fds, write_fds, exceptional_fds = (
select.select(read_fds, write_fds, exceptional_fds))
except select.error, err:
if err.args[0] != errno.EINTR:
raise
else:
continue
for fd in read_fds:
if fd == self.fileno():
self.HandleRequestNoBlock()
continue
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_read_event)
for fd in write_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_write_event)
for fd in exceptional_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_expt_event)
class BasePageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, post_handlers, put_handlers):
self._connect_handlers = connect_handlers
self._get_handlers = get_handlers
self._post_handlers = post_handlers
self._put_handlers = put_handlers
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, socket_server)
def log_request(self, *args, **kwargs):
# Disable request logging to declutter test log output.
pass
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
def do_PUT(self):
for handler in self._put_handlers:
if handler():
return
class TestPageHandler(BasePageHandler):
def __init__(self, request, client_address, socket_server):
connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
get_handlers = [
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoHeaderCache,
self.EchoAllHandler,
self.FileHandler,
self.SetCookieHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ContentTypeHandler,
self.NoContentHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.MultipartHandler,
self.DefaultResponseHandler]
post_handlers = [
self.EchoTitleHandler,
self.EchoAllHandler,
self.EchoHandler,
self.DeviceManagementHandler] + get_handlers
put_handlers = [
self.EchoTitleHandler,
self.EchoAllHandler,
self.EchoHandler] + get_handlers
self._mime_types = {
'crx' : 'application/x-chrome-extension',
'exe' : 'application/octet-stream',
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'pdf' : 'application/pdf',
'xml' : 'text/xml'
}
self._default_mime_type = 'text/html'
BasePageHandler.__init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, post_handlers,
put_handlers)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(shortname, extension) = os.path.splitext(file_name.split("?")[0])
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
return self.EchoHeaderHelper("/echoheader")
"""This function echoes back the value of a specific request header"""
"""while allowing caching for 16 hours."""
def EchoHeaderCache(self):
return self.EchoHeaderHelper("/echoheadercache")
def EchoHeaderHelper(self, echo_header):
"""This function echoes back the value of the request header passed in."""
if not self._ShouldHandleRequest(echo_header):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-type', 'text/plain')
if echo_header == '/echoheadercache':
self.send_header('Cache-control', 'max-age=60000')
else:
self.send_header('Cache-control', 'no-cache')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') != 'chunked':
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
# Read the request body as chunks.
body = ""
while True:
line = self.rfile.readline()
length = int(line, 16)
if length == 0:
self.rfile.readline()
break
body += self.rfile.read(length)
self.rfile.read(2)
return body
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.waitForDownload = True
while self.server.waitForDownload:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.waitForDownload = False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def _ReplaceFileData(self, data, query_parameters):
"""Replaces matching substrings in a file.
If the 'replace_text' URL query parameter is present, it is expected to be
of the form old_text:new_text, which indicates that any old_text strings in
the file are replaced with new_text. Multiple 'replace_text' parameters may
be specified.
If the parameters are not present, |data| is returned.
"""
query_dict = cgi.parse_qs(query_parameters)
replace_text_values = query_dict.get('replace_text', [])
for replace_text_value in replace_text_values:
replace_text_args = replace_text_value.split(':')
if len(replace_text_args) != 2:
raise ValueError(
'replace_text must be of form old_text:new_text. Actual value: %s' %
replace_text_value)
old_text_b64, new_text_b64 = replace_text_args
old_text = base64.urlsafe_b64decode(old_text_b64)
new_text = base64.urlsafe_b64decode(new_text_b64)
data = data.replace(old_text, new_text)
return data
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
f.close()
data = self._ReplaceFileData(data, query)
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = file_path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
status_code = re.findall('HTTP/\d+.\d+ (\d+)', response)[0]
self.send_response(int(status_code))
for line in f:
header_values = re.findall('(\S+):\s*(.*)', line)
if len(header_values) > 0:
# "name: value"
name, value = header_values[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
range = self.headers.get('Range')
if range and range.startswith('bytes='):
# Note this doesn't handle all valid byte range values (i.e. open ended
# ones), just enough for what we needed so far.
range = range[6:].split('-')
start = int(range[0])
end = int(range[1])
self.send_response(206)
content_range = 'bytes ' + str(start) + '-' + str(end) + '/' + \
str(len(data))
self.send_header('Content-Range', content_range)
data = data[start: end + 1]
else:
self.send_response(200)
self.send_header('Content-type', self.GetMIMETypeFromName(file_path))
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Length', len(data))
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
self.wfile.write(data)
return True
def SetCookieHandler(self):
"""This handler just sets a cookie, for testing cookie handling."""
if not self._ShouldHandleRequest("/set-cookie"):
return False
query_char = self.path.find('?')
if query_char != -1:
cookie_values = self.path[query_char + 1:].split('&')
else:
cookie_values = ("",)
self.send_response(200)
self.send_header('Content-type', 'text/html')
for cookie_value in cookie_values:
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for cookie_value in cookie_values:
self.wfile.write('%s' % cookie_value)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
expected_password = 'secret'
realm = 'testrealm'
set_cookie_if_challenged = False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'set-cookie-if-challenged' in query_params:
set_cookie_if_challenged = True
if 'password' in query_params:
expected_password = query_params['password'][0]
if 'realm' in query_params:
realm = query_params['realm'][0]
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != expected_password:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.send_header('Content-type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
elif url_path.endswith(".gif"):
# Using chrome/test/data/google/logo.gif as the test image
test_image_path = ['google', 'logo.gif']
gif_path = os.path.join(self.server.data_dir, *test_image_path)
if not os.path.isfile(gif_path):
self.send_error(404)
return True
f = open(gif_path, "rb")
data = f.read()
f.close()
self.send_response(200)
self.send_header('Content-type', 'image/gif')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write(data)
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
def GetNonce(self, force_reset=False):
"""Returns a nonce that's stable per request path for the server's lifetime.
This is a fake implementation. A real implementation would only use a given
nonce a single time (hence the name n-once). However, for the purposes of
unittesting, we don't care about the security of the nonce.
Args:
force_reset: Iff set, the nonce will be changed. Useful for testing the
"stale" response.
"""
if force_reset or not self.server.nonce_time:
self.server.nonce_time = time.time()
return _new_md5('privatekey%s%d' %
(self.path, self.server.nonce_time)).hexdigest()
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication.
It just sends a page with title 'user/pass' if you succeed.
A stale response is sent iff "stale" is present in the request path.
"""
if not self._ShouldHandleRequest("/auth-digest"):
return False
stale = 'stale' in self.path
nonce = self.GetNonce(force_reset=stale)
opaque = _new_md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = _new_md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = _new_md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = _new_md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = _new_md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n");
return True
def NoContentHandler(self):
"""Returns a 204 No Content response."""
if not self._ShouldHandleRequest("/nocontent"):
return False
self.send_response(204)
self.end_headers()
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?');
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def MultipartHandler(self):
"""Send a multipart response (10 text/html pages)."""
test_name = "/multipart"
if not self._ShouldHandleRequest(test_name):
return False
num_frames = 10
bound = '12345'
self.send_response(200)
self.send_header('Content-type',
'multipart/x-mixed-replace;boundary=' + bound)
self.end_headers()
for i in xrange(num_frames):
self.wfile.write('--' + bound + '\r\n')
self.wfile.write('Content-type: text/html\r\n\r\n')
self.wfile.write('<title>page ' + str(i) + '</title>')
self.wfile.write('page ' + str(i))
self.wfile.write('--' + bound + '--')
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-type', 'text/html')
self.send_header("Content-Length", len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def DeviceManagementHandler(self):
"""Delegates to the device management service used for cloud policy."""
if not self._ShouldHandleRequest("/device_management"):
return False
raw_request = self.ReadRequestBody()
if not self.server._device_management_handler:
import device_management
policy_path = os.path.join(self.server.data_dir, 'device_management')
self.server._device_management_handler = (
device_management.TestServer(policy_path,
self.server.policy_keys,
self.server.policy_user))
http_response, raw_reply = (
self.server._device_management_handler.HandleRequest(self.path,
self.headers,
raw_request))
self.send_response(http_response)
self.end_headers()
self.wfile.write(raw_reply)
return True
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
class SyncPageHandler(BasePageHandler):
"""Handler for the main HTTP sync server."""
def __init__(self, request, client_address, sync_http_server):
get_handlers = [self.ChromiumSyncTimeHandler]
post_handlers = [self.ChromiumSyncCommandHandler]
BasePageHandler.__init__(self, request, client_address,
sync_http_server, [], get_handlers,
post_handlers, [])
def ChromiumSyncTimeHandler(self):
"""Handle Chromium sync .../time requests.
The syncer sometimes checks server reachability by examining /time.
"""
test_name = "/chromiumsync/time"
if not self._ShouldHandleRequest(test_name):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
return True
def ChromiumSyncCommandHandler(self):
"""Handle a chromiumsync command arriving via http.
This covers all sync protocol commands: authentication, getupdates, and
commit.
"""
test_name = "/chromiumsync/command"
if not self._ShouldHandleRequest(test_name):
return False
length = int(self.headers.getheader('content-length'))
raw_request = self.rfile.read(length)
http_response, raw_reply = self.server.HandleCommand(
self.path, raw_request)
self.send_response(http_response)
self.end_headers()
self.wfile.write(raw_reply)
return True
def MakeDataDir():
if options.data_dir:
if not os.path.isdir(options.data_dir):
print 'specified data dir not found: ' + options.data_dir + ' exiting...'
return None
my_data_dir = options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.dirname(sys.argv[0])
my_data_dir = os.path.join(my_data_dir, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
class FileMultiplexer:
def __init__(self, fd1, fd2) :
self.__fd1 = fd1
self.__fd2 = fd2
def __del__(self) :
if self.__fd1 != sys.stdout and self.__fd1 != sys.stderr:
self.__fd1.close()
if self.__fd2 != sys.stdout and self.__fd2 != sys.stderr:
self.__fd2.close()
def write(self, text) :
self.__fd1.write(text)
self.__fd2.write(text)
def flush(self) :
self.__fd1.flush()
self.__fd2.flush()
def main(options, args):
logfile = open('testserver.log', 'w')
sys.stderr = FileMultiplexer(sys.stderr, logfile)
if options.log_to_console:
sys.stdout = FileMultiplexer(sys.stdout, logfile)
else:
sys.stdout = logfile
port = options.port
server_data = {}
if options.server_type == SERVER_HTTP:
if options.cert:
# let's make sure the cert file exists.
if not os.path.isfile(options.cert):
print 'specified server cert file not found: ' + options.cert + \
' exiting...'
return
for ca_cert in options.ssl_client_ca:
if not os.path.isfile(ca_cert):
print 'specified trusted client CA file not found: ' + ca_cert + \
' exiting...'
return
server = HTTPSServer(('127.0.0.1', port), TestPageHandler, options.cert,
options.ssl_client_auth, options.ssl_client_ca,
options.ssl_bulk_cipher)
print 'HTTPS server started on port %d...' % server.server_port
else:
server = StoppableHTTPServer(('127.0.0.1', port), TestPageHandler)
print 'HTTP server started on port %d...' % server.server_port
server.data_dir = MakeDataDir()
server.file_root_url = options.file_root_url
server_data['port'] = server.server_port
server._device_management_handler = None
server.policy_keys = options.policy_keys
server.policy_user = options.policy_user
elif options.server_type == SERVER_SYNC:
server = SyncHTTPServer(('127.0.0.1', port), SyncPageHandler)
print 'Sync HTTP server started on port %d...' % server.server_port
print 'Sync XMPP server started on port %d...' % server.xmpp_port
server_data['port'] = server.server_port
server_data['xmpp_port'] = server.xmpp_port
# means FTP Server
else:
my_data_dir = MakeDataDir()
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to 127.0.0.1:port
address = ('127.0.0.1', port)
server = pyftpdlib.ftpserver.FTPServer(address, ftp_handler)
server_data['port'] = server.socket.getsockname()[1]
print 'FTP server started on port %d...' % server_data['port']
# Notify the parent that we've started. (BaseServer subclasses
# bind their sockets on construction.)
if options.startup_pipe is not None:
server_data_json = simplejson.dumps(server_data)
server_data_len = len(server_data_json)
print 'sending server_data: %s (%d bytes)' % (
server_data_json, server_data_len)
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(options.startup_pipe, 0)
else:
fd = options.startup_pipe
startup_pipe = os.fdopen(fd, "w")
# First write the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the other end of the
# pipe is on the same machine.
startup_pipe.write(struct.pack('=L', server_data_len))
startup_pipe.write(server_data_json)
startup_pipe.close()
try:
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
server.stop = True
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option("-f", '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='start up an FTP server.')
option_parser.add_option('', '--sync', action='store_const',
const=SERVER_SYNC, default=SERVER_HTTP,
dest='server_type',
help='start up a sync server.')
option_parser.add_option('', '--log-to-console', action='store_const',
const=True, default=False,
dest='log_to_console',
help='Enables or disables sys.stdout logging to '
'the console.')
option_parser.add_option('', '--port', default='0', type='int',
help='Port used by the server. If unspecified, the '
'server will listen on an ephemeral port.')
option_parser.add_option('', '--data-dir', dest='data_dir',
help='Directory from which to read the files.')
option_parser.add_option('', '--https', dest='cert',
help='Specify that https should be used, specify '
'the path to the cert containing the private key '
'the server should use.')
option_parser.add_option('', '--ssl-client-auth', action='store_true',
help='Require SSL client auth on every connection.')
option_parser.add_option('', '--ssl-client-ca', action='append', default=[],
help='Specify that the client certificate request '
'should include the CA named in the subject of '
'the DER-encoded certificate contained in the '
'specified file. This option may appear multiple '
'times, indicating multiple CA names should be '
'sent in the request.')
option_parser.add_option('', '--ssl-bulk-cipher', action='append',
help='Specify the bulk encryption algorithm(s)'
'that will be accepted by the SSL server. Valid '
'values are "aes256", "aes128", "3des", "rc4". If '
'omitted, all algorithms will be used. This '
'option may appear multiple times, indicating '
'multiple algorithms should be enabled.');
option_parser.add_option('', '--file-root-url', default='/files/',
help='Specify a root URL for files served.')
option_parser.add_option('', '--startup-pipe', type='int',
dest='startup_pipe',
help='File handle of pipe to parent process')
option_parser.add_option('', '--policy-key', action='append',
dest='policy_keys',
help='Specify a path to a PEM-encoded private key '
'to use for policy signing. May be specified '
'multiple times in order to load multipe keys into '
'the server. If ther server has multiple keys, it '
'will rotate through them in at each request a '
'round-robin fashion. The server will generate a '
'random key if none is specified on the command '
'line.')
option_parser.add_option('', '--policy-user', default='user@example.com',
dest='policy_user',
help='Specify the user name the server should '
'report back to the client as the user owning the '
'token used for making the policy request.')
options, args = option_parser.parse_args()
sys.exit(main(options, args))
|
bsd-3-clause
|
riddlezyc/geolab
|
src/structure/metricC.py
|
1
|
12965
|
import pickle
import matplotlib.pyplot as plt
from datetime import datetime
criteriaA = 4.0
criteriaB = 4.0
criteriaC = 10.0
dirName = r'F:\simulations\asphaltenes\production\longtime\athInHeptane\nvt\analysis\fullmatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInHeptane-illite\nvt\analysis\fullMatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInToluene\nvt\analysis\fullMatrix/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\athInToluene-illite\nvt\analysis/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\a0InHeptane\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\a0InToluene\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\coalInHeptane\nvt\rerun/'
# dirName = r'F:\simulations\asphaltenes\production\longtime\coalInToluene\nvt\rerun/'
fileNameA = 'minMinMatrixFrames.pickle'
fileNameB = 'mincoreMatrixFrames.pickle'
fileNameC = 'maxcoreMatrixFrames.pickle'
print 'opening pickle file for metric A...'
time0 = datetime.now()
with open(dirName + fileNameA, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time0
def ave_accum(list):
avelist = []
avelist.append(list[0])
for i in range(1, len(list)):
avelist.append((avelist[i - 1] * i + list[i]) / float((i + 1)))
return avelist
# metric A
cluster, clusterNo, clusterAve, gmax = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if frame[i][j] <= criteriaA:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
cluster.append(avecount)
clusterNo.append(len(xconnect))
clusterAve.append(float(ng) / len(count))
gmax.append(max(count))
cumulave = ave_accum(cluster)
print 'timeing:', datetime.now() - time0
print 'opening pickle file for metric B...'
time1 = datetime.now()
with open(dirName + fileNameB, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time1
# metric B
clusterB, clusterBNo, clusterBAve, gmaxB = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if frame[i][j] <= criteriaB:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
clusterB.append(avecount)
clusterBNo.append(len(xconnect))
clusterBAve.append(float(ng) / len(count))
gmaxB.append(max(count))
print 'timeing:', datetime.now() - time1
print 'opening pickle file for metric C...'
time2 = datetime.now()
with open(dirName + fileNameC, 'rb') as foo:
data = pickle.load(foo)
print 'timeing:', datetime.now() - time2
clusterC, clusterCNo, clusterCAve, gmaxC = [], [], [], []
for iframe, frame in enumerate(data):
numberofMolecules = len(data[0])
connectSet = [[i] for i in range(numberofMolecules)]
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
xswap = max(frame[i][j], frame[j][i])
if xswap <= criteriaC:
connectSet[i].append(j), connectSet[j].append(i)
for i in range(numberofMolecules - 1):
for j in range(i + 1, numberofMolecules):
if len(set(connectSet[i]).intersection(set(connectSet[j]))) > 0:
x = set(connectSet[i]).union(set(connectSet[j]))
connectSet[i] = list(x)
connectSet[j] = list(x)
xconnect = []
for x in connectSet:
if x not in xconnect:
xconnect.append(x)
count = []
for x in xconnect:
count.append(connectSet.count(x))
ng = sum(count) # should always equal to number of molecules
ng2 = sum([x ** 2 for x in count])
avecount = float(ng2) / float(ng)
clusterC.append(avecount)
clusterCNo.append(len(xconnect))
clusterCAve.append(float(ng) / len(count))
gmaxC.append(max(count))
print 'timeing:', datetime.now() - time0
print 'writing data to file...'
with open(dirName + 'cluster-%s-%s.dat' % (criteriaC, criteriaA), 'w') as foo:
print >> foo, '#frame metricA ave_metricA metricB metricC No.A No.B No.C AveA AveB AveC gmaxA gmaxB gmaxC'
for iframe in range(len(data)):
print >> foo, '%5d%10.4f%10.4f%10.4f%10.4f%5d%5d%5d%10.4f%10.4f%10.4f%5d%5d%5d' % (
iframe, cluster[iframe], cumulave[iframe], clusterB[iframe], clusterC[iframe], clusterNo[iframe],
clusterBNo[iframe], clusterCNo[iframe], clusterAve[iframe], clusterBAve[iframe], clusterCAve[iframe],
gmax[iframe], gmaxB[iframe], gmaxC[iframe])
plt.figure(0, figsize=(8, 4))
figName = dirName + 'metric-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.plot(clusterB, label='metricB')
plt.plot(clusterC, label='metricC')
plt.plot(clusterAve, label='aveA')
plt.plot(clusterBAve, label='aveB')
plt.plot(clusterCAve, label='aveC')
plt.plot(gmax, label='gmaxA')
plt.plot(gmaxB, label='gmaxB')
plt.plot(gmaxC, label='gmaxC')
plt.legend(loc='best', ncol=3, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(0)
plt.figure(0, figsize=(8, 4))
figName = dirName + 'metric-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterC, label='metricC')
plt.semilogy(clusterAve, label='aveA')
plt.semilogy(clusterBAve, label='aveB')
plt.semilogy(clusterCAve, label='aveC')
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(gmaxC, label='gmaxC')
plt.legend(loc='best', ncol=3, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(0)
# plot C
plt.figure(1, figsize=(8, 4))
figName = dirName + 'metricC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmaxC, label='gmaxC')
plt.plot(clusterC, label='metricC')
plt.plot(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(1)
plt.figure(1, figsize=(8, 4))
figName = dirName + 'metricC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmaxC, label='gmaxC')
plt.semilogy(clusterC, label='metricC')
plt.semilogy(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(1)
plt.figure(2, figsize=(8, 4))
figName = dirName + 'metricB-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmaxB, label='gmaxB')
plt.plot(clusterB, label='metricB')
plt.plot(clusterBAve, label='aveB')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(2)
plt.figure(2, figsize=(8, 4))
figName = dirName + 'metricB-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterBAve, label='aveB')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(2)
plt.figure(3, figsize=(8, 4))
figName = dirName + 'metricA-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmax, label='gmaxA')
plt.plot(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.plot(clusterAve, label='aveA')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(3)
plt.figure(3, figsize=(8, 4))
figName = dirName + 'metricA-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(cluster, label='metricA')
# plt.plot(cumulave, label='ave_C', linewidth=3)
plt.semilogy(clusterAve, label='aveA')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(3)
plt.figure(4, figsize=(8, 4))
figName = dirName + 'metricABC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(gmax, label='gmaxA')
plt.plot(gmaxB, label='gmaxB')
plt.plot(gmaxC, label='gmaxC')
plt.plot(cluster, label='metricA')
plt.plot(clusterB, label='metricB')
plt.plot(clusterC, label='metricC')
plt.legend(loc='best', ncol=2, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(4)
plt.figure(4, figsize=(8, 4))
figName = dirName + 'metricABC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(gmax, label='gmaxA')
plt.semilogy(gmaxB, label='gmaxB')
plt.semilogy(gmaxC, label='gmaxC')
plt.semilogy(cluster, label='metricA')
plt.semilogy(clusterB, label='metricB')
plt.semilogy(clusterC, label='metricC')
plt.legend(loc='best', ncol=2, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(4)
plt.figure(5, figsize=(8, 4))
figName = dirName + 'aveABC-%s-%s.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.plot(clusterAve, label='aveA')
plt.plot(clusterBAve, label='aveB')
plt.plot(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(5)
plt.figure(5, figsize=(8, 4))
figName = dirName + 'aveABC-%s-%s-logy.png' % (criteriaC, criteriaA)
plt.title('Time evolution of g2', fontsize=12)
plt.grid(True)
plt.xlabel('Frames', fontsize=12)
plt.ylabel('g2 value', fontsize=12)
plt.semilogy(clusterAve, label='aveA')
plt.semilogy(clusterBAve, label='aveB')
plt.semilogy(clusterCAve, label='aveC')
plt.legend(loc='best', ncol=1, fontsize=12, shadow=False)
plt.savefig(figName, format='png', dpi=300)
# plt.show()
plt.close(5)
print 'timeing:', datetime.now() - time0
|
gpl-3.0
|
LChristakis/chalice-hunter
|
lib/python3.4/site-packages/scss/extension/compass/helpers.py
|
4
|
19696
|
"""Miscellaneous helper functions ported from Compass.
See: http://compass-style.org/reference/compass/helpers/
This collection is not necessarily complete or up-to-date.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import math
import os.path
import six
from . import CompassExtension
from scss import config
from scss.types import Boolean, Function, List, Null, Number, String, Url
from scss.util import to_str, getmtime, make_data_url
import re
log = logging.getLogger(__name__)
ns = CompassExtension.namespace
FONT_TYPES = {
'woff': 'woff',
'otf': 'opentype',
'opentype': 'opentype',
'ttf': 'truetype',
'truetype': 'truetype',
'svg': 'svg',
'eot': 'embedded-opentype'
}
def add_cache_buster(url, mtime):
fragment = url.split('#')
query = fragment[0].split('?')
if len(query) > 1 and query[1] != '':
cb = '&_=%s' % (mtime)
url = '?'.join(query) + cb
else:
cb = '?_=%s' % (mtime)
url = query[0] + cb
if len(fragment) > 1:
url += '#' + fragment[1]
return url
# ------------------------------------------------------------------------------
# Data manipulation
@ns.declare
def blank(*objs):
"""Returns true when the object is false, an empty string, or an empty list"""
for o in objs:
if isinstance(o, Boolean):
is_blank = not o
elif isinstance(o, String):
is_blank = not len(o.value.strip())
elif isinstance(o, List):
is_blank = all(blank(el) for el in o)
else:
is_blank = False
if not is_blank:
return Boolean(False)
return Boolean(True)
@ns.declare
def compact(*args):
"""Returns a new list after removing any non-true values"""
use_comma = True
if len(args) == 1 and isinstance(args[0], List):
use_comma = args[0].use_comma
args = args[0]
return List(
[arg for arg in args if arg],
use_comma=use_comma,
)
@ns.declare
def reject(lst, *values):
"""Removes the given values from the list"""
lst = List.from_maybe(lst)
values = frozenset(List.from_maybe_starargs(values))
ret = []
for item in lst:
if item not in values:
ret.append(item)
return List(ret, use_comma=lst.use_comma)
@ns.declare
def first_value_of(*args):
if len(args) == 1 and isinstance(args[0], String):
first = args[0].value.split()[0]
return type(args[0])(first)
args = List.from_maybe_starargs(args)
if len(args):
return args[0]
else:
return Null()
@ns.declare_alias('-compass-list')
def dash_compass_list(*args):
return List.from_maybe_starargs(args)
@ns.declare_alias('-compass-space-list')
def dash_compass_space_list(*lst):
"""
If the argument is a list, it will return a new list that is space delimited
Otherwise it returns a new, single element, space-delimited list.
"""
ret = dash_compass_list(*lst)
ret.value.pop('_', None)
return ret
@ns.declare_alias('-compass-slice')
def dash_compass_slice(lst, start_index, end_index=None):
start_index = Number(start_index).value
end_index = Number(end_index).value if end_index is not None else None
ret = {}
lst = List(lst)
if end_index:
# This function has an inclusive end, but Python slicing is exclusive
end_index += 1
ret = lst.value[start_index:end_index]
return List(ret, use_comma=lst.use_comma)
# ------------------------------------------------------------------------------
# Property prefixing
@ns.declare
def prefixed(prefix, *args):
to_fnct_str = 'to_' + to_str(prefix).replace('-', '_')
for arg in List.from_maybe_starargs(args):
if hasattr(arg, to_fnct_str):
return Boolean(True)
return Boolean(False)
@ns.declare
def prefix(prefix, *args):
to_fnct_str = 'to_' + to_str(prefix).replace('-', '_')
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, List):
_value = []
for iarg in arg:
to_fnct = getattr(iarg, to_fnct_str, None)
if to_fnct:
_value.append(to_fnct())
else:
_value.append(iarg)
args[i] = List(_value)
else:
to_fnct = getattr(arg, to_fnct_str, None)
if to_fnct:
args[i] = to_fnct()
return List.maybe_new(args, use_comma=True)
@ns.declare_alias('-moz')
def dash_moz(*args):
return prefix('_moz', *args)
@ns.declare_alias('-svg')
def dash_svg(*args):
return prefix('_svg', *args)
@ns.declare_alias('-css2')
def dash_css2(*args):
return prefix('_css2', *args)
@ns.declare_alias('-pie')
def dash_pie(*args):
return prefix('_pie', *args)
@ns.declare_alias('-webkit')
def dash_webkit(*args):
return prefix('_webkit', *args)
@ns.declare_alias('-owg')
def dash_owg(*args):
return prefix('_owg', *args)
@ns.declare_alias('-khtml')
def dash_khtml(*args):
return prefix('_khtml', *args)
@ns.declare_alias('-ms')
def dash_ms(*args):
return prefix('_ms', *args)
@ns.declare_alias('-o')
def dash_o(*args):
return prefix('_o', *args)
# ------------------------------------------------------------------------------
# Selector generation
@ns.declare
def append_selector(selector, to_append):
if isinstance(selector, List):
lst = selector.value
else:
lst = String.unquoted(selector).value.split(',')
to_append = String.unquoted(to_append).value.strip()
ret = sorted(set(s.strip() + to_append for s in lst if s.strip()))
ret = dict(enumerate(ret))
ret['_'] = ','
return ret
_elements_of_type_block = 'address, article, aside, blockquote, center, dd, details, dir, div, dl, dt, fieldset, figcaption, figure, footer, form, frameset, h1, h2, h3, h4, h5, h6, header, hgroup, hr, isindex, menu, nav, noframes, noscript, ol, p, pre, section, summary, ul'
_elements_of_type_inline = 'a, abbr, acronym, audio, b, basefont, bdo, big, br, canvas, cite, code, command, datalist, dfn, em, embed, font, i, img, input, kbd, keygen, label, mark, meter, output, progress, q, rp, rt, ruby, s, samp, select, small, span, strike, strong, sub, sup, textarea, time, tt, u, var, video, wbr'
_elements_of_type_table = 'table'
_elements_of_type_list_item = 'li'
_elements_of_type_table_row_group = 'tbody'
_elements_of_type_table_header_group = 'thead'
_elements_of_type_table_footer_group = 'tfoot'
_elements_of_type_table_row = 'tr'
_elements_of_type_table_cel = 'td, th'
_elements_of_type_html5_block = 'article, aside, details, figcaption, figure, footer, header, hgroup, menu, nav, section, summary'
_elements_of_type_html5_inline = 'audio, canvas, command, datalist, embed, keygen, mark, meter, output, progress, rp, rt, ruby, time, video, wbr'
_elements_of_type_html5 = 'article, aside, audio, canvas, command, datalist, details, embed, figcaption, figure, footer, header, hgroup, keygen, mark, menu, meter, nav, output, progress, rp, rt, ruby, section, summary, time, video, wbr'
_elements_of_type = {
'block': sorted(_elements_of_type_block.replace(' ', '').split(',')),
'inline': sorted(_elements_of_type_inline.replace(' ', '').split(',')),
'table': sorted(_elements_of_type_table.replace(' ', '').split(',')),
'list-item': sorted(_elements_of_type_list_item.replace(' ', '').split(',')),
'table-row-group': sorted(_elements_of_type_table_row_group.replace(' ', '').split(',')),
'table-header-group': sorted(_elements_of_type_table_header_group.replace(' ', '').split(',')),
'table-footer-group': sorted(_elements_of_type_table_footer_group.replace(' ', '').split(',')),
'table-row': sorted(_elements_of_type_table_footer_group.replace(' ', '').split(',')),
'table-cell': sorted(_elements_of_type_table_footer_group.replace(' ', '').split(',')),
'html5-block': sorted(_elements_of_type_html5_block.replace(' ', '').split(',')),
'html5-inline': sorted(_elements_of_type_html5_inline.replace(' ', '').split(',')),
'html5': sorted(_elements_of_type_html5.replace(' ', '').split(',')),
}
@ns.declare
def elements_of_type(display):
d = String.unquoted(display)
ret = _elements_of_type.get(d.value, None)
if ret is None:
raise Exception("Elements of type '%s' not found!" % d.value)
return List(map(String, ret), use_comma=True)
@ns.declare
def enumerate_(prefix, frm, through, separator='-'):
separator = String.unquoted(separator).value
try:
frm = int(getattr(frm, 'value', frm))
except ValueError:
frm = 1
try:
through = int(getattr(through, 'value', through))
except ValueError:
through = frm
if frm > through:
# DEVIATION: allow reversed enumerations (and ranges as range() uses enumerate, like '@for .. from .. through')
frm, through = through, frm
rev = reversed
else:
rev = lambda x: x
ret = []
for i in rev(range(frm, through + 1)):
if prefix and prefix.value:
ret.append(String.unquoted(prefix.value + separator + six.text_type(i)))
else:
ret.append(Number(i))
return List(ret, use_comma=True)
@ns.declare_alias('headings')
@ns.declare
def headers(frm=None, to=None):
if frm and to is None:
if isinstance(frm, String) and frm.value.lower() == 'all':
frm = 1
to = 6
else:
try:
to = int(getattr(frm, 'value', frm))
except ValueError:
to = 6
frm = 1
else:
try:
frm = 1 if frm is None else int(getattr(frm, 'value', frm))
except ValueError:
frm = 1
try:
to = 6 if to is None else int(getattr(to, 'value', to))
except ValueError:
to = 6
ret = [String.unquoted('h' + six.text_type(i)) for i in range(frm, to + 1)]
return List(ret, use_comma=True)
@ns.declare
def nest(*arguments):
if isinstance(arguments[0], List):
lst = arguments[0]
elif isinstance(arguments[0], String):
lst = arguments[0].value.split(',')
else:
raise TypeError("Expected list or string, got %r" % (arguments[0],))
ret = []
for s in lst:
if isinstance(s, String):
s = s.value
elif isinstance(s, six.string_types):
s = s
else:
raise TypeError("Expected string, got %r" % (s,))
s = s.strip()
if not s:
continue
ret.append(s)
for arg in arguments[1:]:
if isinstance(arg, List):
lst = arg
elif isinstance(arg, String):
lst = arg.value.split(',')
else:
raise TypeError("Expected list or string, got %r" % (arg,))
new_ret = []
for s in lst:
if isinstance(s, String):
s = s.value
elif isinstance(s, six.string_types):
s = s
else:
raise TypeError("Expected string, got %r" % (s,))
s = s.strip()
if not s:
continue
for r in ret:
if '&' in s:
new_ret.append(s.replace('&', r))
else:
if not r or r[-1] in ('.', ':', '#'):
new_ret.append(r + s)
else:
new_ret.append(r + ' ' + s)
ret = new_ret
ret = [String.unquoted(s) for s in sorted(set(ret))]
return List(ret, use_comma=True)
# This isn't actually from Compass, but it's just a shortcut for enumerate().
# DEVIATION: allow reversed ranges (range() uses enumerate() which allows reversed values, like '@for .. from .. through')
@ns.declare
def range_(frm, through=None):
if through is None:
through = frm
frm = 1
return enumerate_(None, frm, through)
# ------------------------------------------------------------------------------
# Working with CSS constants
OPPOSITE_POSITIONS = {
'top': String.unquoted('bottom'),
'bottom': String.unquoted('top'),
'left': String.unquoted('right'),
'right': String.unquoted('left'),
'center': String.unquoted('center'),
}
DEFAULT_POSITION = [String.unquoted('center'), String.unquoted('top')]
def _position(opposite, positions):
if positions is None:
positions = DEFAULT_POSITION
else:
positions = List.from_maybe(positions)
ret = []
for pos in positions:
if isinstance(pos, (String, six.string_types)):
pos_value = getattr(pos, 'value', pos)
if pos_value in OPPOSITE_POSITIONS:
if opposite:
ret.append(OPPOSITE_POSITIONS[pos_value])
else:
ret.append(pos)
continue
elif pos_value == 'to':
# Gradient syntax keyword; leave alone
ret.append(pos)
continue
elif isinstance(pos, Number):
if pos.is_simple_unit('%'):
if opposite:
ret.append(Number(100 - pos.value, '%'))
else:
ret.append(pos)
continue
elif pos.is_simple_unit('deg'):
# TODO support other angle types?
if opposite:
ret.append(Number((pos.value + 180) % 360, 'deg'))
else:
ret.append(pos)
continue
if opposite:
log.warn("Can't find opposite for position %r" % (pos,))
ret.append(pos)
return List(ret, use_comma=False).maybe()
@ns.declare
def position(p):
return _position(False, p)
@ns.declare
def opposite_position(p):
return _position(True, p)
# ------------------------------------------------------------------------------
# Math
@ns.declare
def pi():
return Number(math.pi)
@ns.declare
def e():
return Number(math.e)
@ns.declare
def log_(number, base=None):
if not isinstance(number, Number):
raise TypeError("Expected number, got %r" % (number,))
elif not number.is_unitless:
raise ValueError("Expected unitless number, got %r" % (number,))
if base is None:
pass
elif not isinstance(base, Number):
raise TypeError("Expected number, got %r" % (base,))
elif not base.is_unitless:
raise ValueError("Expected unitless number, got %r" % (base,))
if base is None:
ret = math.log(number.value)
else:
ret = math.log(number.value, base.value)
return Number(ret)
@ns.declare
def pow(number, exponent):
return number ** exponent
ns.set_function('sqrt', 1, Number.wrap_python_function(math.sqrt))
ns.set_function('sin', 1, Number.wrap_python_function(math.sin))
ns.set_function('cos', 1, Number.wrap_python_function(math.cos))
ns.set_function('tan', 1, Number.wrap_python_function(math.tan))
# ------------------------------------------------------------------------------
# Fonts
def _fonts_root():
return config.STATIC_ROOT if config.FONTS_ROOT is None else config.FONTS_ROOT
def _font_url(path, only_path=False, cache_buster=True, inline=False):
filepath = String.unquoted(path).value
file = None
FONTS_ROOT = _fonts_root()
if callable(FONTS_ROOT):
try:
_file, _storage = list(FONTS_ROOT(filepath))[0]
except IndexError:
filetime = None
else:
filetime = getmtime(_file, _storage)
if filetime is None:
filetime = 'NA'
elif inline:
file = _storage.open(_file)
else:
_path = os.path.join(FONTS_ROOT, filepath.strip('/'))
filetime = getmtime(_path)
if filetime is None:
filetime = 'NA'
elif inline:
file = open(_path, 'rb')
BASE_URL = config.FONTS_URL or config.STATIC_URL
if file and inline:
font_type = None
if re.match(r'^([^?]+)[.](.*)([?].*)?$', path.value):
font_type = String.unquoted(re.match(r'^([^?]+)[.](.*)([?].*)?$', path.value).groups()[1]).value
try:
mime = FONT_TYPES[font_type]
except KeyError:
raise Exception('Could not determine font type for "%s"' % path.value)
mime = FONT_TYPES.get(font_type)
if font_type == 'woff':
mime = 'application/font-woff'
elif font_type == 'eot':
mime = 'application/vnd.ms-fontobject'
url = make_data_url(
(mime if '/' in mime else 'font/%s' % mime),
file.read())
file.close()
else:
url = '%s/%s' % (BASE_URL.rstrip('/'), filepath.lstrip('/'))
if cache_buster and filetime != 'NA':
url = add_cache_buster(url, filetime)
if only_path:
return String.unquoted(url)
else:
return Url.unquoted(url)
def _font_files(args, inline):
if args == ():
return String.unquoted("")
fonts = []
args_len = len(args)
skip_next = False
for index, arg in enumerate(args):
if not skip_next:
font_type = args[index + 1] if args_len > (index + 1) else None
if font_type and font_type.value in FONT_TYPES:
skip_next = True
else:
if re.match(r'^([^?]+)[.](.*)([?].*)?$', arg.value):
font_type = String.unquoted(re.match(r'^([^?]+)[.](.*)([?].*)?$', arg.value).groups()[1])
if font_type.value in FONT_TYPES:
fonts.append(List([
_font_url(arg, inline=inline),
Function(FONT_TYPES[font_type.value], 'format'),
], use_comma=False))
else:
raise Exception('Could not determine font type for "%s"' % arg.value)
else:
skip_next = False
return List(fonts, separator=',')
@ns.declare
def font_url(path, only_path=False, cache_buster=True):
"""
Generates a path to an asset found relative to the project's font directory.
Passing a true value as the second argument will cause the only the path to
be returned instead of a `url()` function
"""
return _font_url(path, only_path, cache_buster, False)
@ns.declare
def font_files(*args):
return _font_files(args, inline=False)
@ns.declare
def inline_font_files(*args):
return _font_files(args, inline=True)
# ------------------------------------------------------------------------------
# External stylesheets
@ns.declare
def stylesheet_url(path, only_path=False, cache_buster=True):
"""
Generates a path to an asset found relative to the project's css directory.
Passing a true value as the second argument will cause the only the path to
be returned instead of a `url()` function
"""
filepath = String.unquoted(path).value
if callable(config.STATIC_ROOT):
try:
_file, _storage = list(config.STATIC_ROOT(filepath))[0]
except IndexError:
filetime = None
else:
filetime = getmtime(_file, _storage)
if filetime is None:
filetime = 'NA'
else:
_path = os.path.join(config.STATIC_ROOT, filepath.strip('/'))
filetime = getmtime(_path)
if filetime is None:
filetime = 'NA'
BASE_URL = config.STATIC_URL
url = '%s%s' % (BASE_URL, filepath)
if cache_buster:
url = add_cache_buster(url, filetime)
if only_path:
return String.unquoted(url)
else:
return Url.unquoted(url)
|
mit
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/weather/metoffice.py
|
2
|
3688
|
"""Support for UK Met Office weather service."""
import logging
import voluptuous as vol
from homeassistant.components.sensor.metoffice import (
CONDITION_CLASSES, ATTRIBUTION, MetOfficeCurrentData)
from homeassistant.components.weather import PLATFORM_SCHEMA, WeatherEntity
from homeassistant.const import (
CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS)
from homeassistant.helpers import config_validation as cv
REQUIREMENTS = ['datapoint==0.4.3']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Met Office"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(CONF_LATITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.longitude,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Met Office weather platform."""
import datapoint as dp
name = config.get(CONF_NAME)
datapoint = dp.connection(api_key=config.get(CONF_API_KEY))
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return
try:
site = datapoint.get_nearest_site(
latitude=latitude, longitude=longitude)
except dp.exceptions.APIException as err:
_LOGGER.error("Received error from Met Office Datapoint: %s", err)
return
if not site:
_LOGGER.error("Unable to get nearest Met Office forecast site")
return
data = MetOfficeCurrentData(hass, datapoint, site)
try:
data.update()
except (ValueError, dp.exceptions.APIException) as err:
_LOGGER.error("Received error from Met Office Datapoint: %s", err)
return
add_entities([MetOfficeWeather(site, data, name)], True)
class MetOfficeWeather(WeatherEntity):
"""Implementation of a Met Office weather condition."""
def __init__(self, site, data, name):
"""Initialise the platform with a data instance and site."""
self._name = name
self.data = data
self.site = site
def update(self):
"""Update current conditions."""
self.data.update()
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, self.site.name)
@property
def condition(self):
"""Return the current condition."""
return [k for k, v in CONDITION_CLASSES.items() if
self.data.data.weather.value in v][0]
@property
def temperature(self):
"""Return the platform temperature."""
return self.data.data.temperature.value
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the mean sea-level pressure."""
return None
@property
def humidity(self):
"""Return the relative humidity."""
return self.data.data.humidity.value
@property
def wind_speed(self):
"""Return the wind speed."""
return self.data.data.wind_speed.value
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.data.data.wind_direction.value
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
|
apache-2.0
|
bbc/kamaelia
|
Sketches/RJL/Packages/Kamaelia/Community/RJL/Kamaelia/Util/LineSplit.py
|
9
|
1318
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
from PureTransformer import PureTransformer
class LineSplit(PureTransformer):
"""Split each message into its separate lines and send them on as separate messages"""
def processMessage(self, msg):
splitmsg = msg.split("\n")
for line in splitmsg:
self.send(line, "outbox")
__kamaelia_components__ = ( LineSplit, )
|
apache-2.0
|
ecohealthalliance/EpiTator
|
epitator/annospan.py
|
1
|
6496
|
#!/usr/bin/env python
# coding=utf8
from __future__ import absolute_import
EMPTY_LIST = []
class AnnoSpan(object):
"""
A span of text with an annotation applied to it.
"""
__slots__ = ["start", "end", "doc", "metadata", "label", "base_spans"]
def __init__(self, start, end, doc, label=None, metadata=None):
self.start = start
self.end = end
self.doc = doc
self.metadata = metadata
# Base spans is only non-empty on span groups.
self.base_spans = EMPTY_LIST
self.label = label
def __repr__(self):
return u'AnnoSpan({0}-{1}, {2})'.format(self.start, self.end, self.label or self.text)
def __lt__(self, other):
if self.start < other.start:
return True
elif self.start == other.start:
return self.end < other.end
else:
return False
def __len__(self):
return self.end - self.start
def distance(self, other_span):
"""
The number of characters between this span and the other one.
If the spans overlap the distance is the negative length of their
overlap.
>>> from .annotier import AnnoTier
>>> from .annodoc import AnnoDoc
>>> doc = AnnoDoc('one two three')
>>> tier = AnnoTier([AnnoSpan(0, 3, doc), AnnoSpan(8, 13, doc)])
>>> tier.spans[0].distance(tier.spans[1])
5
"""
if self.start < other_span.start:
return other_span.start - self.end
else:
return self.start - other_span.end
def overlaps(self, other_span):
"""
Return true if the span overlaps other_span.
"""
return (
(self.start >= other_span.start and self.start < other_span.end) or
(other_span.start >= self.start and other_span.start < self.end)
)
def contains(self, other_span):
"""
Return true if the span completely contains other_span.
"""
return self.start <= other_span.start and self.end >= other_span.end
def adjacent_to(self, other_span, max_dist=1):
"""
Return true if the span comes before or after other_span with at most
max_dist charaters between them.
"""
return (
self.comes_before(other_span, max_dist) or
other_span.comes_before(self, max_dist)
)
def comes_before(self, other_span, max_dist=1, allow_overlap=False):
"""
Return True if the span comes before the other_span and there are
max_dist or fewer charaters between them.
>>> from .annotier import AnnoTier
>>> from .annodoc import AnnoDoc
>>> doc = AnnoDoc('one two three')
>>> tier = AnnoTier([AnnoSpan(0, 3, doc), AnnoSpan(4, 7, doc)])
>>> tier.spans[0].comes_before(tier.spans[1])
True
>>> tier.spans[1].comes_before(tier.spans[0])
False
"""
if allow_overlap:
ok_start = self.start <= other_span.start
else:
ok_start = self.end <= other_span.start
return ok_start and self.end >= other_span.start - max_dist
def extended_through(self, other_span):
"""
Create a new span that includes this one and the other span.
"""
return SpanGroup([self, other_span], self.label)
def trimmed(self):
"""
Create a new AnnoSpan based on this one with the offsets adjusted
so that there is no white space at the beginning or end.
>>> from .annodoc import AnnoDoc
>>> doc = AnnoDoc('one two three')
>>> original_span = AnnoSpan(3, 8, doc)
>>> original_span.trimmed()
AnnoSpan(4-7, two)
"""
start = self.start
end = self.end
doc_text = self.doc.text
while start < end and doc_text[start] == " ":
start += 1
while start < end and doc_text[end - 1] == " ":
end -= 1
return AnnoSpan(start, end, self.doc, label=self.label, metadata=self.metadata)
@property
def text(self):
return self.doc.text[self.start:self.end]
def to_dict(self):
"""
Return a json serializable dictionary.
"""
return dict(
label=self.label,
textOffsets=[[self.start, self.end]]
)
def groupdict(self):
"""
Return a dict with all the labeled matches.
>>> from .annodoc import AnnoDoc
>>> doc = AnnoDoc('one two wolf')
>>> number_span_g = SpanGroup([AnnoSpan(0, 3, doc, 'number'),
... AnnoSpan(4, 7, doc, 'number'),
... AnnoSpan(8, 12, doc, 'animal')])
>>> number_span_g.groupdict()['number']
[AnnoSpan(0-3, number), AnnoSpan(4-7, number)]
>>> number_span_g.groupdict()['animal']
[AnnoSpan(8-12, animal)]
"""
out = {}
for base_span in self.base_spans:
for key, values in base_span.groupdict().items():
out[key] = out.get(key, []) + values
for values in out.values():
values.sort()
if self.label:
out[self.label] = [self]
return out
def iterate_base_spans(self):
"""
Recursively iterate over all base_spans including base_spans of child SpanGroups.
"""
for span in self.base_spans:
yield span
for span2 in span.iterate_base_spans():
yield span2
def iterate_leaf_base_spans(self):
"""
Return the leaf base spans in a SpanGroup tree.
"""
for span in self.iterate_base_spans():
if not isinstance(span, SpanGroup):
yield span
class SpanGroup(AnnoSpan):
"""
A AnnoSpan that extends through a group of AnnoSpans.
"""
def __init__(self, base_spans, label=None, metadata=None):
assert isinstance(base_spans, list)
assert len(base_spans) > 0
super(SpanGroup, self).__init__(
min(s.start for s in base_spans),
max(s.end for s in base_spans),
base_spans[0].doc,
label,
metadata)
self.base_spans = base_spans
def __repr__(self):
return ("SpanGroup("
"text=" + self.text + ", "
"label=" + str(self.label) + ", " +
", ".join(map(str, self.base_spans)) + ")")
|
apache-2.0
|
christabor/MoAL
|
MOAL/software_engineering/problem_solving/design_patterns/grasp/pattern_polymorphism.py
|
1
|
1104
|
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
DEBUG = True if __name__ == '__main__' else False
class BlobOfMatter(object):
def __str__(self):
raise NotImplementedError
class UndifferentiatedCell(BlobOfMatter):
"""Each child class implements a __str__ method (beyond the native python)
version, so each name is the same, but there are many different versions."""
def __str__(self):
return '[ Undifferentiated Cell ]'
class SkinCell(UndifferentiatedCell):
def __str__(self):
return 'Skin cell'
class BrainCell(UndifferentiatedCell):
def __str__(self):
return 'Brain cell'
class NerveCell(UndifferentiatedCell):
def __str__(self):
return 'Nerve cell'
if DEBUG:
with Section('GRASP polymorphism pattern'):
cells = [UndifferentiatedCell(), SkinCell(), BrainCell(), NerveCell()]
for cell in cells:
print(cell)
|
apache-2.0
|
syrrim/werkzeug
|
examples/plnt/views.py
|
52
|
1145
|
# -*- coding: utf-8 -*-
"""
plnt.views
~~~~~~~~~~
Display the aggregated feeds.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
from datetime import datetime, date
from plnt.database import Blog, Entry
from plnt.utils import Pagination, expose, render_template
#: number of items per page
PER_PAGE = 30
@expose('/', defaults={'page': 1})
@expose('/page/<int:page>')
def index(request, page):
"""Show the index page or any an offset of it."""
days = []
days_found = set()
query = Entry.query.order_by(Entry.pub_date.desc())
pagination = Pagination(query, PER_PAGE, page, 'index')
for entry in pagination.entries:
day = date(*entry.pub_date.timetuple()[:3])
if day not in days_found:
days_found.add(day)
days.append({'date': day, 'entries': []})
days[-1]['entries'].append(entry)
return render_template('index.html', days=days, pagination=pagination)
@expose('/about')
def about(request):
"""Show the about page, so that we have another view func ;-)"""
return render_template('about.html')
|
bsd-3-clause
|
saulpw/visidata
|
visidata/loaders/html.py
|
1
|
4860
|
import html
from visidata import *
option('html_title', '<h2>{sheet.name}</h2>', 'table header when saving to html')
def open_html(p):
return HtmlTablesSheet(p.name, source=p)
open_htm = open_html
class HtmlTablesSheet(IndexSheet):
rowtype = 'sheets' # rowdef: HtmlTableSheet (sheet.html = lxml.html.HtmlElement)
columns = IndexSheet.columns + [
Column('tag', width=0, getter=lambda col,row: row.html.tag),
Column('id', getter=lambda col,row: row.html.attrib.get('id')),
Column('classes', getter=lambda col,row: row.html.attrib.get('class')),
]
def iterload(self):
import lxml.html
from lxml import etree
utf8_parser = etree.HTMLParser(encoding='utf-8')
with self.source.open_text() as fp:
html = lxml.html.etree.parse(fp, parser=utf8_parser)
self.setKeys([self.column('name')])
self.column('keys').hide()
self.column('source').hide()
for i, e in enumerate(html.iter('table')):
if e.tag == 'table':
yield HtmlTableSheet(e.attrib.get("id", "table_" + str(i)), source=e, html=e)
def is_header(elem):
scope = elem.attrib.get('scope', '')
if elem.tag == 'th':
if not scope or scope == 'col':
return True
return False
class HtmlTableSheet(Sheet):
rowtype = 'rows' # list of strings
columns = []
def iterload(self):
headers = []
maxlinks = {} # [colnum] -> nlinks:int
for rownum, r in enumerate(self.source.iter('tr')):
row = []
colnum = 0
# get starting column, which might be different if there were rowspan>1 already
if rownum < len(headers):
while colnum < len(headers[rownum]):
if headers[rownum][colnum] is None:
break
colnum += 1
for cell in r.getchildren():
colspan = int(cell.attrib.get('colspan', 1))
rowspan = int(cell.attrib.get('rowspan', 1))
cellval = ' '.join(x.strip() for x in cell.itertext()) # text only without markup
links = [x.get('href') for x in cell.iter('a')]
maxlinks[colnum] = max(maxlinks.get(colnum, 0), len(links))
if is_header(cell):
for k in range(rownum, rownum+rowspan):
while k >= len(headers): # extend headers list with lists for all header rows
headers.append([])
for j in range(colnum, colnum+colspan):
while j >= len(headers[k]):
headers[k].append(None)
headers[k][j] = cellval
cellval = '' # use empty non-None value for subsequent rows in the rowspan
else:
while colnum >= len(row):
row.append(None)
row[colnum] = (cellval, links)
colnum += colspan
if any(row):
yield row
self.columns = []
if headers:
it = itertools.zip_longest(*headers, fillvalue='')
else:
it = [list(x) for x in self.rows[0]]
self.rows = self.rows[1:]
for colnum, names in enumerate(it):
name = '_'.join(str(x) for x in names if x)
self.addColumn(Column(name, getter=lambda c,r,i=colnum: r[i][0]))
for linknum in range(maxlinks.get(colnum, 0)):
self.addColumn(Column(name+'_link'+str(linknum), width=20, getter=lambda c,r,i=colnum,j=linknum: r[i][1][j]))
@VisiData.api
def save_html(vd, p, *vsheets):
'Save vsheets as HTML tables in a single file'
with open(p, 'w', encoding='ascii', errors='xmlcharrefreplace') as fp:
for sheet in vsheets:
if options.html_title:
fp.write(options.html_title.format(sheet=sheet, vd=vd))
fp.write('<table id="{sheetname}">\n'.format(sheetname=html.escape(sheet.name)))
# headers
fp.write('<tr>')
for col in sheet.visibleCols:
contents = html.escape(col.name)
fp.write('<th>{colname}</th>'.format(colname=contents))
fp.write('</tr>\n')
# rows
with Progress(gerund='saving'):
for dispvals in sheet.iterdispvals(format=True):
fp.write('<tr>')
for val in dispvals.values():
fp.write('<td>')
fp.write(html.escape(val))
fp.write('</td>')
fp.write('</tr>\n')
fp.write('</table>')
vd.status('%s save finished' % p)
VisiData.save_htm = VisiData.save_html
|
gpl-3.0
|
krisdages/ultisnips
|
test/test_MultipleMatches.py
|
29
|
2438
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Selecting Between Same Triggers {{{#
class _MultipleMatches(_VimTest):
snippets = (('test', 'Case1', 'This is Case 1'),
('test', 'Case2', 'This is Case 2'))
class Multiple_SimpleCaseSelectFirst_ECR(_MultipleMatches):
keys = 'test' + EX + '1\n'
wanted = 'Case1'
class Multiple_SimpleCaseSelectSecond_ECR(_MultipleMatches):
keys = 'test' + EX + '2\n'
wanted = 'Case2'
class Multiple_SimpleCaseSelectTooHigh_ESelectLast(_MultipleMatches):
keys = 'test' + EX + '5\n'
wanted = 'Case2'
class Multiple_SimpleCaseSelectZero_EEscape(_MultipleMatches):
keys = 'test' + EX + '0\n' + 'hi'
wanted = 'testhi'
class Multiple_SimpleCaseEscapeOut_ECR(_MultipleMatches):
keys = 'test' + EX + ESC + 'hi'
wanted = 'testhi'
class Multiple_ManySnippetsOneTrigger_ECR(_VimTest):
# Snippet definition {{{#
snippets = (
('test', 'Case1', 'This is Case 1'),
('test', 'Case2', 'This is Case 2'),
('test', 'Case3', 'This is Case 3'),
('test', 'Case4', 'This is Case 4'),
('test', 'Case5', 'This is Case 5'),
('test', 'Case6', 'This is Case 6'),
('test', 'Case7', 'This is Case 7'),
('test', 'Case8', 'This is Case 8'),
('test', 'Case9', 'This is Case 9'),
('test', 'Case10', 'This is Case 10'),
('test', 'Case11', 'This is Case 11'),
('test', 'Case12', 'This is Case 12'),
('test', 'Case13', 'This is Case 13'),
('test', 'Case14', 'This is Case 14'),
('test', 'Case15', 'This is Case 15'),
('test', 'Case16', 'This is Case 16'),
('test', 'Case17', 'This is Case 17'),
('test', 'Case18', 'This is Case 18'),
('test', 'Case19', 'This is Case 19'),
('test', 'Case20', 'This is Case 20'),
('test', 'Case21', 'This is Case 21'),
('test', 'Case22', 'This is Case 22'),
('test', 'Case23', 'This is Case 23'),
('test', 'Case24', 'This is Case 24'),
('test', 'Case25', 'This is Case 25'),
('test', 'Case26', 'This is Case 26'),
('test', 'Case27', 'This is Case 27'),
('test', 'Case28', 'This is Case 28'),
('test', 'Case29', 'This is Case 29'),
) # }}}
keys = 'test' + EX + ' ' + ESC + ESC + 'ahi'
wanted = 'testhi'
# End: Selecting Between Same Triggers #}}}
|
gpl-3.0
|
mlyundin/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
zhukaixy/kbengine
|
kbe/src/lib/python/Lib/test/test_mimetypes.py
|
111
|
4279
|
import io
import locale
import mimetypes
import sys
import unittest
from test import support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
mimetypes.inited = False
mimetypes._default_mime_types()
class MimeTypesTestCase(unittest.TestCase):
def setUp(self):
self.db = mimetypes.MimeTypes()
def test_default_data(self):
eq = self.assertEqual
eq(self.db.guess_type("foo.html"), ("text/html", None))
eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress"))
eq(self.db.guess_type("foo.tar.bz2"), ("application/x-tar", "bzip2"))
eq(self.db.guess_type("foo.tar.xz"), ("application/x-tar", "xz"))
def test_data_urls(self):
eq = self.assertEqual
guess_type = self.db.guess_type
eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None))
def test_file_parsing(self):
eq = self.assertEqual
sio = io.StringIO("x-application/x-unittest pyunit\n")
self.db.readfp(sio)
eq(self.db.guess_type("foo.pyunit"),
("x-application/x-unittest", None))
eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit")
def test_non_standard_types(self):
eq = self.assertEqual
# First try strict
eq(self.db.guess_type('foo.xul', strict=True), (None, None))
eq(self.db.guess_extension('image/jpg', strict=True), None)
# And then non-strict
eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None))
eq(self.db.guess_extension('image/jpg', strict=False), '.jpg')
def test_guess_all_types(self):
eq = self.assertEqual
unless = self.assertTrue
# First try strict. Use a set here for testing the results because if
# test_urllib2 is run before test_mimetypes, global state is modified
# such that the 'all' set will have more items in it.
all = set(self.db.guess_all_extensions('text/plain', strict=True))
unless(all >= set(['.bat', '.c', '.h', '.ksh', '.pl', '.txt']))
# And now non-strict
all = self.db.guess_all_extensions('image/jpg', strict=False)
all.sort()
eq(all, ['.jpg'])
# And now for no hits
all = self.db.guess_all_extensions('image/jpg', strict=True)
eq(all, [])
def test_encoding(self):
getpreferredencoding = locale.getpreferredencoding
self.addCleanup(setattr, locale, 'getpreferredencoding',
getpreferredencoding)
locale.getpreferredencoding = lambda: 'ascii'
filename = support.findfile("mime.types")
mimes = mimetypes.MimeTypes([filename])
exts = mimes.guess_all_extensions('application/vnd.geocube+xml',
strict=True)
self.assertEqual(exts, ['.g3', '.g\xb3'])
@unittest.skipUnless(sys.platform.startswith("win"), "Windows only")
class Win32MimeTypesTestCase(unittest.TestCase):
def setUp(self):
# ensure all entries actually come from the Windows registry
self.original_types_map = mimetypes.types_map.copy()
mimetypes.types_map.clear()
mimetypes.init()
self.db = mimetypes.MimeTypes()
def tearDown(self):
# restore default settings
mimetypes.types_map.clear()
mimetypes.types_map.update(self.original_types_map)
def test_registry_parsing(self):
# the original, minimum contents of the MIME database in the
# Windows registry is undocumented AFAIK.
# Use file types that should *always* exist:
eq = self.assertEqual
eq(self.db.guess_type("foo.txt"), ("text/plain", None))
eq(self.db.guess_type("image.jpg"), ("image/jpeg", None))
eq(self.db.guess_type("image.png"), ("image/png", None))
def test_main():
support.run_unittest(MimeTypesTestCase,
Win32MimeTypesTestCase
)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
weizhenwei/qemu
|
scripts/qapi.py
|
66
|
19807
|
#
# QAPI helper library
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013 Red Hat Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import re
from ordereddict import OrderedDict
import os
import sys
builtin_types = [
'str', 'int', 'number', 'bool',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'
]
builtin_type_qtypes = {
'str': 'QTYPE_QSTRING',
'int': 'QTYPE_QINT',
'number': 'QTYPE_QFLOAT',
'bool': 'QTYPE_QBOOL',
'int8': 'QTYPE_QINT',
'int16': 'QTYPE_QINT',
'int32': 'QTYPE_QINT',
'int64': 'QTYPE_QINT',
'uint8': 'QTYPE_QINT',
'uint16': 'QTYPE_QINT',
'uint32': 'QTYPE_QINT',
'uint64': 'QTYPE_QINT',
}
def error_path(parent):
res = ""
while parent:
res = ("In file included from %s:%d:\n" % (parent['file'],
parent['line'])) + res
parent = parent['parent']
return res
class QAPISchemaError(Exception):
def __init__(self, schema, msg):
self.input_file = schema.input_file
self.msg = msg
self.col = 1
self.line = schema.line
for ch in schema.src[schema.line_pos:schema.pos]:
if ch == '\t':
self.col = (self.col + 7) % 8 + 1
else:
self.col += 1
self.info = schema.parent_info
def __str__(self):
return error_path(self.info) + \
"%s:%d:%d: %s" % (self.input_file, self.line, self.col, self.msg)
class QAPIExprError(Exception):
def __init__(self, expr_info, msg):
self.info = expr_info
self.msg = msg
def __str__(self):
return error_path(self.info['parent']) + \
"%s:%d: %s" % (self.info['file'], self.info['line'], self.msg)
class QAPISchema:
def __init__(self, fp, input_relname=None, include_hist=[],
previously_included=[], parent_info=None):
""" include_hist is a stack used to detect inclusion cycles
previously_included is a global state used to avoid multiple
inclusions of the same file"""
input_fname = os.path.abspath(fp.name)
if input_relname is None:
input_relname = fp.name
self.input_dir = os.path.dirname(input_fname)
self.input_file = input_relname
self.include_hist = include_hist + [(input_relname, input_fname)]
previously_included.append(input_fname)
self.parent_info = parent_info
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
self.cursor = 0
self.line = 1
self.line_pos = 0
self.exprs = []
self.accept()
while self.tok != None:
expr_info = {'file': input_relname, 'line': self.line, 'parent': self.parent_info}
expr = self.get_expr(False)
if isinstance(expr, dict) and "include" in expr:
if len(expr) != 1:
raise QAPIExprError(expr_info, "Invalid 'include' directive")
include = expr["include"]
if not isinstance(include, str):
raise QAPIExprError(expr_info,
'Expected a file name (string), got: %s'
% include)
include_path = os.path.join(self.input_dir, include)
for elem in self.include_hist:
if include_path == elem[1]:
raise QAPIExprError(expr_info, "Inclusion loop for %s"
% include)
# skip multiple include of the same file
if include_path in previously_included:
continue
try:
fobj = open(include_path, 'r')
except IOError, e:
raise QAPIExprError(expr_info,
'%s: %s' % (e.strerror, include))
exprs_include = QAPISchema(fobj, include, self.include_hist,
previously_included, expr_info)
self.exprs.extend(exprs_include.exprs)
else:
expr_elem = {'expr': expr,
'info': expr_info}
self.exprs.append(expr_elem)
def accept(self):
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
if self.tok == '#':
self.cursor = self.src.find('\n', self.cursor)
elif self.tok in ['{', '}', ':', ',', '[', ']']:
return
elif self.tok == "'":
string = ''
esc = False
while True:
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
raise QAPISchemaError(self,
'Missing terminating "\'"')
if esc:
string += ch
esc = False
elif ch == "\\":
esc = True
elif ch == "'":
self.val = string
return
else:
string += ch
elif self.tok == '\n':
if self.cursor == len(self.src):
self.tok = None
return
self.line += 1
self.line_pos = self.cursor
elif not self.tok.isspace():
raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
def get_members(self):
expr = OrderedDict()
if self.tok == '}':
self.accept()
return expr
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string or "}"')
while True:
key = self.val
self.accept()
if self.tok != ':':
raise QAPISchemaError(self, 'Expected ":"')
self.accept()
if key in expr:
raise QAPISchemaError(self, 'Duplicate key "%s"' % key)
expr[key] = self.get_expr(True)
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "}"')
self.accept()
if self.tok != "'":
raise QAPISchemaError(self, 'Expected string')
def get_values(self):
expr = []
if self.tok == ']':
self.accept()
return expr
if not self.tok in [ '{', '[', "'" ]:
raise QAPISchemaError(self, 'Expected "{", "[", "]" or string')
while True:
expr.append(self.get_expr(True))
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
raise QAPISchemaError(self, 'Expected "," or "]"')
self.accept()
def get_expr(self, nested):
if self.tok != '{' and not nested:
raise QAPISchemaError(self, 'Expected "{"')
if self.tok == '{':
self.accept()
expr = self.get_members()
elif self.tok == '[':
self.accept()
expr = self.get_values()
elif self.tok == "'":
expr = self.val
self.accept()
else:
raise QAPISchemaError(self, 'Expected "{", "[" or string')
return expr
def find_base_fields(base):
base_struct_define = find_struct(base)
if not base_struct_define:
return None
return base_struct_define['data']
# Return the discriminator enum define if discriminator is specified as an
# enum type, otherwise return None.
def discriminator_find_enum_define(expr):
base = expr.get('base')
discriminator = expr.get('discriminator')
if not (discriminator and base):
return None
base_fields = find_base_fields(base)
if not base_fields:
return None
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
return None
return find_enum(discriminator_type)
def check_event(expr, expr_info):
params = expr.get('data')
if params:
for argname, argentry, optional, structured in parse_args(params):
if structured:
raise QAPIExprError(expr_info,
"Nested structure define in event is not "
"supported, event '%s', argname '%s'"
% (expr['event'], argname))
def check_union(expr, expr_info):
name = expr['union']
base = expr.get('base')
discriminator = expr.get('discriminator')
members = expr['data']
# If the object has a member 'base', its value must name a complex type.
if base:
base_fields = find_base_fields(base)
if not base_fields:
raise QAPIExprError(expr_info,
"Base '%s' is not a valid type"
% base)
# If the union object has no member 'discriminator', it's an
# ordinary union.
if not discriminator:
enum_define = None
# Else if the value of member 'discriminator' is {}, it's an
# anonymous union.
elif discriminator == {}:
enum_define = None
# Else, it's a flat union.
else:
# The object must have a member 'base'.
if not base:
raise QAPIExprError(expr_info,
"Flat union '%s' must have a base field"
% name)
# The value of member 'discriminator' must name a member of the
# base type.
discriminator_type = base_fields.get(discriminator)
if not discriminator_type:
raise QAPIExprError(expr_info,
"Discriminator '%s' is not a member of base "
"type '%s'"
% (discriminator, base))
enum_define = find_enum(discriminator_type)
# Do not allow string discriminator
if not enum_define:
raise QAPIExprError(expr_info,
"Discriminator '%s' must be of enumeration "
"type" % discriminator)
# Check every branch
for (key, value) in members.items():
# If this named member's value names an enum type, then all members
# of 'data' must also be members of the enum type.
if enum_define and not key in enum_define['enum_values']:
raise QAPIExprError(expr_info,
"Discriminator value '%s' is not found in "
"enum '%s'" %
(key, enum_define["enum_name"]))
# Todo: add checking for values. Key is checked as above, value can be
# also checked here, but we need more functions to handle array case.
def check_exprs(schema):
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
check_union(expr, expr_elem['info'])
if expr.has_key('event'):
check_event(expr, expr_elem['info'])
def parse_schema(input_file):
try:
schema = QAPISchema(open(input_file, "r"))
except (QAPISchemaError, QAPIExprError), e:
print >>sys.stderr, e
exit(1)
exprs = []
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('enum'):
add_enum(expr['enum'], expr['data'])
elif expr.has_key('union'):
add_union(expr)
elif expr.has_key('type'):
add_struct(expr)
exprs.append(expr)
# Try again for hidden UnionKind enum
for expr_elem in schema.exprs:
expr = expr_elem['expr']
if expr.has_key('union'):
if not discriminator_find_enum_define(expr):
add_enum('%sKind' % expr['union'])
try:
check_exprs(schema)
except QAPIExprError, e:
print >>sys.stderr, e
exit(1)
return exprs
def parse_args(typeinfo):
if isinstance(typeinfo, basestring):
struct = find_struct(typeinfo)
assert struct != None
typeinfo = struct['data']
for member in typeinfo:
argname = member
argentry = typeinfo[member]
optional = False
structured = False
if member.startswith('*'):
argname = member[1:]
optional = True
if isinstance(argentry, OrderedDict):
structured = True
yield (argname, argentry, optional, structured)
def de_camel_case(name):
new_name = ''
for ch in name:
if ch.isupper() and new_name:
new_name += '_'
if ch == '-':
new_name += '_'
else:
new_name += ch.lower()
return new_name
def camel_case(name):
new_name = ''
first = True
for ch in name:
if ch in ['_', '-']:
first = True
elif first:
new_name += ch.upper()
first = False
else:
new_name += ch.lower()
return new_name
def c_var(name, protect=True):
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
'for', 'goto', 'if', 'int', 'long', 'register', 'return',
'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
'_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno'])
if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words):
return "q_" + name
return name.replace('-', '_').lstrip("*")
def c_fun(name, protect=True):
return c_var(name, protect).replace('.', '_')
def c_list_type(name):
return '%sList' % name
def type_name(name):
if type(name) == list:
return c_list_type(name[0])
return name
enum_types = []
struct_types = []
union_types = []
def add_struct(definition):
global struct_types
struct_types.append(definition)
def find_struct(name):
global struct_types
for struct in struct_types:
if struct['type'] == name:
return struct
return None
def add_union(definition):
global union_types
union_types.append(definition)
def find_union(name):
global union_types
for union in union_types:
if union['union'] == name:
return union
return None
def add_enum(name, enum_values = None):
global enum_types
enum_types.append({"enum_name": name, "enum_values": enum_values})
def find_enum(name):
global enum_types
for enum in enum_types:
if enum['enum_name'] == name:
return enum
return None
def is_enum(name):
return find_enum(name) != None
eatspace = '\033EATSPACE.'
# A special suffix is added in c_type() for pointer types, and it's
# stripped in mcgen(). So please notice this when you check the return
# value of c_type() outside mcgen().
def c_type(name, is_param=False):
if name == 'str':
if is_param:
return 'const char *' + eatspace
return 'char *' + eatspace
elif name == 'int':
return 'int64_t'
elif (name == 'int8' or name == 'int16' or name == 'int32' or
name == 'int64' or name == 'uint8' or name == 'uint16' or
name == 'uint32' or name == 'uint64'):
return name + '_t'
elif name == 'size':
return 'uint64_t'
elif name == 'bool':
return 'bool'
elif name == 'number':
return 'double'
elif type(name) == list:
return '%s *%s' % (c_list_type(name[0]), eatspace)
elif is_enum(name):
return name
elif name == None or len(name) == 0:
return 'void'
elif name == name.upper():
return '%sEvent *%s' % (camel_case(name), eatspace)
else:
return '%s *%s' % (name, eatspace)
def is_c_ptr(name):
suffix = "*" + eatspace
return c_type(name).endswith(suffix)
def genindent(count):
ret = ""
for i in range(count):
ret += " "
return ret
indent_level = 0
def push_indent(indent_amount=4):
global indent_level
indent_level += indent_amount
def pop_indent(indent_amount=4):
global indent_level
indent_level -= indent_amount
def cgen(code, **kwds):
indent = genindent(indent_level)
lines = code.split('\n')
lines = map(lambda x: indent + x, lines)
return '\n'.join(lines) % kwds + '\n'
def mcgen(code, **kwds):
raw = cgen('\n'.join(code.split('\n')[1:-1]), **kwds)
return re.sub(re.escape(eatspace) + ' *', '', raw)
def basename(filename):
return filename.split("/")[-1]
def guardname(filename):
guard = basename(filename).rsplit(".", 1)[0]
for substr in [".", " ", "-"]:
guard = guard.replace(substr, "_")
return guard.upper() + '_H'
def guardstart(name):
return mcgen('''
#ifndef %(name)s
#define %(name)s
''',
name=guardname(name))
def guardend(name):
return mcgen('''
#endif /* %(name)s */
''',
name=guardname(name))
# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1
# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2
# ENUM24_Name -> ENUM24_NAME
def _generate_enum_string(value):
c_fun_str = c_fun(value, False)
if value.isupper():
return c_fun_str
new_name = ''
l = len(c_fun_str)
for i in range(l):
c = c_fun_str[i]
# When c is upper and no "_" appears before, do more checks
if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_":
# Case 1: next string is lower
# Case 2: previous string is digit
if (i < (l - 1) and c_fun_str[i + 1].islower()) or \
c_fun_str[i - 1].isdigit():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum_full_value(enum_name, enum_value):
abbrev_string = _generate_enum_string(enum_name)
value_string = _generate_enum_string(enum_value)
return "%s_%s" % (abbrev_string, value_string)
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.