repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
goose3/goose3
goose3/extractors/authors.py
2
2390
# -*- coding: utf-8 -*- """\ This is a python port of "Goose" orignialy licensed to Gravity.com under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Python port was written by Xavier Grangier for Recrutae Gravity.com licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from goose3.extractors import BaseExtractor class AuthorsExtractor(BaseExtractor): def extract(self): authors = set() for known_tag in self.config.known_author_patterns: meta_tags = self.parser.getElementsByTag(self.article.doc, attr=known_tag.attr, value=known_tag.value, tag=known_tag.tag) if not meta_tags: continue for meta_tag in meta_tags: if known_tag.subpattern: name_nodes = self.parser.getElementsByTag(meta_tag, attr=known_tag.subpattern.attr, value=known_tag.subpattern.value, tag=known_tag.subpattern.tag) if len(name_nodes) > 0: name = self.parser.getText(name_nodes[0]) authors.add(name) else: if known_tag.tag is None: name = self.parser.getAttribute(meta_tag, known_tag.content) if not name: continue authors.add(name) else: authors.add(meta_tag.text_content().strip()) return list(authors)
apache-2.0
norlanliu/proctrl
test/googletest/xcode/Scripts/versiongenerate.py
3088
4536
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to prepare version informtion for use the gtest Info.plist file. This script extracts the version information from the configure.ac file and uses it to generate a header file containing the same information. The #defines in this header file will be included in during the generation of the Info.plist of the framework, giving the correct value to the version shown in the Finder. This script makes the following assumptions (these are faults of the script, not problems with the Autoconf): 1. The AC_INIT macro will be contained within the first 1024 characters of configure.ac 2. The version string will be 3 integers separated by periods and will be surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first segment represents the major version, the second represents the minor version and the third represents the fix version. 3. No ")" character exists between the opening "(" and closing ")" of AC_INIT, including in comments and character strings. """ import sys import re # Read the command line argument (the output directory for Version.h) if (len(sys.argv) < 3): print "Usage: versiongenerate.py input_dir output_dir" sys.exit(1) else: input_dir = sys.argv[1] output_dir = sys.argv[2] # Read the first 1024 characters of the configure.ac file config_file = open("%s/configure.ac" % input_dir, 'r') buffer_size = 1024 opening_string = config_file.read(buffer_size) config_file.close() # Extract the version string from the AC_INIT macro # The following init_expression means: # Extract three integers separated by periods and surrounded by squre # brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy # (*? is the non-greedy flag) since that would pull in everything between # the first "(" and the last ")" in the file. version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", re.DOTALL) version_values = version_expression.search(opening_string) major_version = version_values.group(1) minor_version = version_values.group(2) fix_version = version_values.group(3) # Write the version information to a header file to be included in the # Info.plist file. file_data = """// // DO NOT MODIFY THIS FILE (but you can delete it) // // This file is autogenerated by the versiongenerate.py script. This script // is executed in a "Run Script" build phase when creating gtest.framework. This // header file is not used during compilation of C-source. Rather, it simply // defines some version strings for substitution in the Info.plist. Because of // this, we are not not restricted to C-syntax nor are we using include guards. // #define GTEST_VERSIONINFO_SHORT %s.%s #define GTEST_VERSIONINFO_LONG %s.%s.%s """ % (major_version, minor_version, major_version, minor_version, fix_version) version_file = open("%s/Version.h" % output_dir, 'w') version_file.write(file_data) version_file.close()
gpl-3.0
emk/pyjamas
library/pyjamas/chart/Axis.py
3
80103
""" * Copyright 2007,2008,2009 John C. Gunther * Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net> * * Licensed under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http:#www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. * """ import time from pyjamas.ui.HTML import HTML from pyjamas.chart import NumberFormat from pyjamas.chart import DateTimeFormat from pyjamas.chart import Double from pyjamas.chart import TickLocation from pyjamas.chart import AnnotationLocation from pyjamas.chart import Annotation from pyjamas.chart.GChartConsts import NAI from pyjamas.chart.GChartConsts import DEFAULT_TICK_COUNT from pyjamas.chart.GChartConsts import DEFAULT_WIDGET_WIDTH_UPPERBOUND from pyjamas.chart.GChartConsts import DEFAULT_WIDGET_HEIGHT_UPPERBOUND from pyjamas.chart.GChartConsts import DEFAULT_TICK_LABEL_FONT_COLOR from pyjamas.chart.GChartConsts import DEFAULT_TICK_LABEL_FONTSIZE from pyjamas.chart.GChartConsts import DEFAULT_TICK_LABEL_FONT_STYLE from pyjamas.chart.GChartConsts import DEFAULT_TICK_LABEL_FONT_WEIGHT from pyjamas.chart.GChartConsts import DEFAULT_TICK_LABEL_FORMAT from pyjamas.chart.GChartConsts import DEFAULT_TICK_LENGTH from pyjamas.chart.GChartConsts import DEFAULT_TICK_THICKNESS from pyjamas.chart.GChartConsts import Y2TICKS_ID from pyjamas.chart.GChartConsts import Y2GRIDLINES_ID from pyjamas.chart.GChartConsts import Y2AXIS_ID from pyjamas.chart.GChartConsts import YTICKS_ID from pyjamas.chart.GChartConsts import YGRIDLINES_ID from pyjamas.chart.GChartConsts import YAXIS_ID from pyjamas.chart.GChartConsts import XTICKS_ID from pyjamas.chart.GChartConsts import XGRIDLINES_ID from pyjamas.chart.GChartConsts import XAXIS_ID from pyjamas.chart.GChartConsts import TICK_CHARHEIGHT_TO_FONTSIZE_LOWERBOUND from pyjamas.chart.GChartConsts import TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND from pyjamas.chart.GChartConsts import Y_AXIS from pyjamas.chart.GChartConsts import Y2_AXIS from pyjamas.chart.GChartUtil import htmlHeight, htmlWidth # these are used in formatting tick positions into tick labels: NUMBER_FORMAT_TYPE = 0 DATE_FORMAT_TYPE = 1 LOG10INVERSE_FORMAT_TYPE = 2 LOG2INVERSE_FORMAT_TYPE = 3 """* ** Represents an axis of the chart, for example, the x, ** y, or y2 axis. An axis consists of the axis itself, ** along with its tick marks, tick labels and gridlines. ** ** @see XAxis XAxis ** @see YAxis YAxis ** @see Y2Axis Y2Axis ** @see #getXAxis getXAxis ** @see #getYAxis getYAxis ** @see #getY2Axis getY2Axis ** ** *""" class AxisLimits: def __init__(self, min, max): self.min = min self.max = max def equals(self, al): return (al.min == min and al.max == max) class Axis: def __init__(self, chart): self.chart = chart self.tickLocation = TickLocation.DEFAULT_TICK_LOCATION self.numberFormat = NumberFormat.getFormat(DEFAULT_TICK_LABEL_FORMAT) self.dateFormat = DateTimeFormat.getShortDateTimeFormat() self.tickLabelFormatType = NUMBER_FORMAT_TYPE self.nCurvesVisibleOnAxis = 0; # # of developer curves on axis. # (count does not include system or # invisible curves) # different initial curr, prev ==> "limits have changed" state self.currentLimits = AxisLimits( Double.MAX_VALUE, -Double.MAX_VALUE) self.previousLimits = AxisLimits( -Double.MAX_VALUE, Double.MAX_VALUE) self.axisLabel = None self.axisLabelThickness = NAI self.hasGridlines = False self.tickCount = DEFAULT_TICK_COUNT # axes auto-scale whenever min or max are NaN. self.axisMax = Double.NaN self.axisMin = Double.NaN # this symbol facilitates rendering of gridlines & axes self.tickLabelFontColor = DEFAULT_TICK_LABEL_FONT_COLOR # In CSS font-size pixels. These define the height of each # character; our code relies on the rule of thumb that # character width is approximately 3/5th this height to # obtain a reasonably tight upper bound on tick label widths. self.tickLabelFontSize = DEFAULT_TICK_LABEL_FONTSIZE self.tickLabelFontStyle = DEFAULT_TICK_LABEL_FONT_STYLE self.tickLabelFontWeight = DEFAULT_TICK_LABEL_FONT_WEIGHT self.tickLabelFormat = DEFAULT_TICK_LABEL_FORMAT self.tickLabelThickness = NAI self.tickLabelPadding = 0 self.ticksPerLabel = 1 self.ticksPerGridline = 1 self.tickLength = DEFAULT_TICK_LENGTH # this symbol facilitates rendering of labeled tick-marks self.tickThickness = DEFAULT_TICK_THICKNESS # is axis itself visible (has no impact ticks or their labels) self.axisVisible = True def getChart(self): return self.chart def getSystemCurve(self, idx): return self.chart.getSystemCurve(idx) def incrementCurves(self): self.nCurvesVisibleOnAxis += 1 def decrementCurves(self): self.nCurvesVisibleOnAxis -= 1 # adds a labeled tick mark via this Axis' special system tick curve def addTickAsPoint(self, tickPosition, tickLabel, tickWidget, widthUpperBound, heightUpperBound): c = self.getSystemCurve(self.ticksId) if self.isHorizontalAxis: c.addPoint(tickPosition, self.axisPosition*Double.MAX_VALUE) else: c.addPoint(self.axisPosition*Double.MAX_VALUE, tickPosition) # unlabeled tick--we are done, so return to save time if None == tickLabel and None == tickWidget: return #add an annotation representing the tick label p = c.getPoint() if self.isHorizontalAxis: # below tick on X, above it on (the future) X2 p.setAnnotationLocation( (self.axisPosition < 0) and AnnotationLocation.SOUTH or AnnotationLocation.NORTH) if self.tickLabelPadding != 0: # padding < 0 is rare but allowed p.setAnnotationYShift(self.axisPosition*tickLabelPadding) # else stick with default of 0 y-shift else: # to left of tick mark on Y, to right of it on Y2 p.setAnnotationLocation( (self.axisPosition < 0) and AnnotationLocation.WEST or AnnotationLocation.EAST) if self.tickLabelPadding != 0: p.setAnnotationXShift(self.axisPosition*self.tickLabelPadding) # else stick with default of 0 x-shift if None != tickLabel: p.setAnnotationText(tickLabel, widthUpperBound, heightUpperBound) elif None != tickWidget: p.setAnnotationWidget(tickWidget, widthUpperBound, heightUpperBound) p.setAnnotationFontSize(self.getTickLabelFontSize()) p.setAnnotationFontStyle(self.getTickLabelFontStyle()) p.setAnnotationFontColor(self.getTickLabelFontColor()) p.setAnnotationFontWeight(self.getTickLabelFontWeight()) """* * Adds a tick at the specified position with the specified * label on this axis, whose width and height are within * the specified upper-bounds. * * <p> * Note that explicitly adding a single tick via this method * will eliminate any auto-generated ticks associated with the * <tt>setTickCount</tt> method. * * <p> * Use this method to specify unusually spaced * tick marks with labels that do not directly * reflect the position (for example, for a logarithmic axis, * or for a bar chart with special keyword-type labels, or * a time axis that places date and time on two separate lines). * * @param tickPosition the position, in model units, along * this axis at which the tick is displayed. * For example, if the axis range goes from 0 to 1, * a tick at position 0.5 would appear in the middle of * the axis. * * @param tickLabel the label for this tick. HTML is * supported in tick labels, but it must be prefixed by * <tt>&lt;html&gt</tt>. See the {@link * Curve.Point#setAnnotationText(String,int,int) * setAnnotationText} method for more information. * * @param widthUpperBound an upper bound on the width of * the text or HTML, in pixels. Use <tt>NAI</tt> to * get GChart to estimate this width for you. See the * <tt>setAnnotationText</tt> method for more information. * * @param heightUpperBound an upper bound on the height of * the text or HTML, in pixels. Use <tt>NAI</tt> to * get GChart to estimate this height for you. See the * <tt>setAnnotationText</tt> method for more information. * * @see #clearTicks clearTicks * @see #addTick(double) addTick(double) * @see #addTick(double,String) addTick(double,String) * @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) * @see #setTickCount setTickCount * @see #setTickLabelFormat setTickLabelFormat * @see #setTickLabelFontSize setTickLabelFontSize * @see #setTickLabelFontStyle setTickLabelFontStyle * @see #setTickLabelFontColor setTickLabelFontColor * @see #setTickLabelFontWeight setTickLabelFontWeight * @see Curve.Point#setAnnotationText(String,int,int) * setAnnotationText * @see Curve.Point#setAnnotationWidget setAnnotationWidget * """ def _addTickLabel(self, tickPosition, tickLabel, widthUpperBound, heightUpperBound): self.chartDecorationsChanged = True if NAI != self.tickCount: # clear out any auto-generated ticks cTicks = self.getSystemCurve(self.ticksId) cTicks.clearPoints() self.tickCount = NAI self.addTickAsPoint(tickPosition, tickLabel, None, widthUpperBound, heightUpperBound) def addTick(self, tickPosition, tickWidget=None, widthUpperBound=None, heightUpperBound=None): """* * Adds a widget-defined tick label at the specified * position, whose width and height are within * the specified upper-bounds. * * @param tickPosition the position, in model units, along * this axis at which the tick is displayed. * For example, if the axis range goes from 0 to 1, * a tick at position 0.5 would appear in the middle of * the axis. * * @param tickWidget the label for this tick, as defined * by any GWT Widget. * * @param widthUpperBound an upper bound on the width of * the widget, in pixels. If this and the next * parameter are omitted, GChart will use * <tt>DEFAULT_WIDGET_WIDTH_UPPERBOUND</tt>. * * @param heightUpperBound an upper bound on the height of * the widget, in pixels. If this and the previous * parameter are omitted, GChart will use <tt> * DEFAULT_WIDGET_HEIGHT_UPPERBOUND</tt> * * @see #addTick(double,Widget) addTick(double,Widget) * @see #addTick(double,String,int,int) addTick(double,String,int,int) * @see Curve.Point#setAnnotationWidget setAnnotationWidget * @see #DEFAULT_WIDGET_WIDTH_UPPERBOUND DEFAULT_WIDGET_WIDTH_UPPERBOUND * @see #DEFAULT_WIDGET_HEIGHT_UPPERBOUND DEFAULT_WIDGET_HEIGHT_UPPERBOUND *""" if tickWidget is None: tiickWidget = self.formatAsTickLabel(tickPosition) if isinstance(tickWidget, str): if widthUpperBound is None and heightUpperBound is None: widthUpperBound = NAI heightUpperBound = NAI self._addTickLabel(tickPosition, tickWidget, widthUpperBound, heightUpperBound) return if widthUpperBound is None and heightUpperBound is None: widthUpperBound = DEFAULT_WIDGET_WIDTH_UPPERBOUND heightUpperBound = DEFAULT_WIDGET_HEIGHT_UPPERBOUND self.chartDecorationsChanged = True if NAI != self.tickCount: # clear out any auto-generated ticks cTicks = self.getSystemCurve(self.ticksId) cTicks.clearPoints() self.tickCount = NAI self.addTickAsPoint(tickPosition, None, tickWidget, widthUpperBound, heightUpperBound) def clearTicks(self): """* * * Removes all ticks from this axis. Specifically, * erases any ticks that were explicitly specified via * <tt>addTick</tt>, and also sets the tick count to 0. * <p> * * @see #setTickCount setTickCount * @see #addTick(double) addTick(double) * @see #addTick(double,String) addTick(double,String) * @see #addTick(double,String,int,int) addTick(double,String,int,int) * @see #addTick(double,Widget) addTick(double,Widget) * @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) * """ self.chartDecorationsChanged = True self.tickCount = NAI c = self.getSystemCurve(self.ticksId) c.clearPoints() def clientToModel(self, clientCoordinate): """* * Converts a pixel, client-window coordinate position along this * axis into the model units associated with this axis. * * @param clientCoordinate a pixel-based coordinate that defines * the dimension associated with this axis in the standard * client window coordinates of GWT. * * @return the location defined by the client-coordinate argument, * but converted into the model units associated * with this axis. * * @see #getMouseCoordinate getMouseCoordinate * @see #modelToClient modelToClient * @see #pixelToModel pixelToModel * @see #modelToPixel modelToPixel * * """ pass def formatAsTickLabel(self, value): """* * * Applies this axis' tick label format to format a given value. * * @return the value formated as per this axis' currently specified * tick label format. * * @see #setTickLabelFormat(String) setTickLabelFormat * """ result = None if self.tickLabelFormatType == DATE_FORMAT_TYPE: #Date transDate = Date((long) value) transDate = time.gmtime(value) result = self.dateFormat.format(transDate) elif self.tickLabelFormatType == LOG10INVERSE_FORMAT_TYPE: value = pow(10., value) result = self.numberFormat.format(value) elif self.tickLabelFormatType == LOG2INVERSE_FORMAT_TYPE: value = pow(2., value) result = self.numberFormat.format(value) else: result = self.numberFormat.format(value) return result def formatNumberAsTickLabel(self, value): """* * @deprecated * * Equivalent to the better-named formatAsTickLabel. * <p> * * @see #formatAsTickLabel formatAsTickLabel * """ return self.formatAsTickLabel(value) def getAxisLabel(self): """* Returns the previously specified label of this axis. ** ** @return the Widget used as the label of this axis ** ** @see #setAxisLabel setAxisLabel ** """ return self.axisLabel def getAxisLabelThickness(self): """* Returns the thickness of the axis-label-holding region ** adjacent to the region allocated for this axis' tick labels. ** <p> ** ** Note that if the axis label is <tt>None</tt> (the ** default) then this method always returns 0, since ** in that case no rectangular region will be allocated ** for the axis label. ** <p> ** ** @return the thickness of the axis-label-holding ** region, in pixels. ** ** @see #setAxisLabelThickness setAxisLabelThickness ** """ result = 0 # Base class implementation is for y axes (x-axis will override). EXTRA_CHARWIDTH = 2; # 1-char padding on each side DEF_CHARWIDTH = 1; # when widget has no text if None == self.getAxisLabel(): result = 0 elif NAI != self.axisLabelThickness: result = self.axisLabelThickness elif hasattr(self.getAxisLabel(), 'getHTML'): charWidth = htmlWidth( self.getAxisLabel().getHTML()) result = int ( round((charWidth + EXTRA_CHARWIDTH) * self.getTickLabelFontSize() * TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND)) elif hasattr(self.getAxisLabel(), "getText"): text = self.getAxisLabel().getText() result = int (round((EXTRA_CHARWIDTH + (text and len(text) or 0)) * self.getTickLabelFontSize() * TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND)) else: # non-text widget. Not a clue, just use def width result = int ( round( (DEF_CHARWIDTH + EXTRA_CHARWIDTH) * self.getTickLabelFontSize() * TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND) ) return result def getAxisMax(self): """* ** Returns the maximum value displayed on this axis. ** If the explicitly specified maximum value is ** undefined (<tt>Double.NaN</tt>) the maximum value returned ** by this function is calculated as the maximum of ** all of the values either displayed on this axis via ** points on a curve, or explicitly specified via tick ** positions. ** ** @return maximum value visible on this axis, in ** "model units" (arbitrary, application-specific, ** units) ** ** @see #setAxisMax setAxisMax ** @see #getDataMin getDataMin ** @see #getDataMax getDataMax *""" if not (Double.NaN==(self.axisMax)): return self.axisMax elif NAI != self.tickCount: return self.getDataMax() else: return max(self.getDataMax(), self.getTickMax()) def getAxisMin(self): """* ** ** Returns the minimum value displayed on this axis. ** If the minimum value is undefined (<tt>Double.NaN</tt>) the ** minimum value returned by this function is the ** minimum of all of the values either displayed on ** this axis via points on a curve, or explicitly specified ** via tick positions. ** ** @return minimum value visible on this axis, in ** "model units" (arbitrary, application-specific, ** units) ** ** @see #setAxisMin setAxisMin *""" if not (Double.NaN==(self.axisMin)): return self.axisMin; # explicitly set elif NAI != self.tickCount: return self.getDataMin() else: return min(self.getDataMin(), self.getTickMin()) def getAxisVisible(self): """* Is axis line visible on the chart? Note that ** this property only determines the visibility of the axis line ** itself. It does not control the visibility of the ** tick marks or tick labels along this axis. ** <p> ** ** @return True if the axis line is visible, False otherwise. ** ** @see #setAxisVisible setAxisVisible ** *""" return self.axisVisible def getDataMax(self): """* Returns the maximum data value associated with values ** represented on this axis. For example, for the left ** y-axis, this would be the largest y-value of all points ** contained in curves that are displayed on the left y-axis. ** ** @return the maximum value associated with values ** mapped onto this axis. ** ** @see #getDataMin getDataMin ** @see #getAxisMax getAxisMax ** @see #getAxisMin getAxisMin ** """ pass def getDataMin(self): """* Returns the minimum data value associated with values ** represented on this axis. For example, for the left ** y-axis, this would be the smallest y-value of all points ** contained in curves that are displayed on the left y-axis. ** ** @return the minimum value associated with values ** mapped onto this axis. ** ** @see #getDataMax getDataMax ** @see #getAxisMax getAxisMax ** @see #getAxisMin getAxisMax ** """ pass def getHasGridlines(self): """* Returns the gridline setting previously made with ** <tt>setHasGridlines</tt>. ** ** @return True if gridlines have been enabled, False if not. ** ** @see #setHasGridlines setHasGridlines ** *""" return hasGridlines def getMouseCoordinate(self): """* * Returns the coordinate along this axis that * is associated with the last "GChart-tracked" mouse * location. * * @return the coordinate, projected along this axis, in * the scale defined by this axis, representing the * position GChart has currently "tracked" the mouse to, * or <tt>Double.NaN</tt> if GChart has tracked the mouse * right off the edge of the chart. * * @see #clientToModel clientToModel * @see #modelToClient modelToClient * @see #pixelToModel pixelToModel * @see #modelToPixel modelToPixel * @see GChart#setHoverTouchingEnabled setHoverTouchingEnabled * """ pass def getNCurvesVisibleOnAxis(self): """* * Returns the number of visible curves displayed on this axis. * <p> * * @return the number of visible curves on this axis, or <tt>0</tt> if * there are no visible curves on this axis. * * @see Axis#setVisible setVisible * """ return self.nCurvesVisibleOnAxis def getTickCount(self): """* ** Returns the number of ticks on this axis. ** ** @return the number of ticks on this axis. ** ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #clearTicks clearTicks ** *""" result = self.tickCount if NAI == self.tickCount: c = self.getSystemCurve(self.ticksId) result = c.getNPoints() return result def getTickLabelFontWeight(self): """* ** Returns the CSS font-weight specification to be used ** by this axis' tick labels. ** ** @return font-weight of this axis' tick labels ** ** @see #setTickLabelFontWeight setTickLabelFontWeight *""" return self.tickLabelFontWeight def getTickLabelFontColor(self): """* ** Returns the color of the font used to display the ** text of the tick labels on this axis. ** ** ** @return CSS color string defining the color of the text of ** the tick labels for this axis. ** ** @see #setTickLabelFontColor setTickLabelFontColor ** ** @see #DEFAULT_TICK_LABEL_FONT_COLOR DEFAULT_TICK_LABEL_FONT_COLOR ** ** ** *""" return self.tickLabelFontColor def getTickLabelFontStyle(self): """* ** Returns the font-style of the font used to render tick ** labels on this axis (typically either "italic" or ** "normal") ** ** @return the CSS font-style in which tick labels of this axis ** are rendered. ** ** @see #setTickLabelFontStyle setTickLabelFontStyle *""" return self.tickLabelFontStyle def getTickLabelFontSize(self): """* Returns the CSS font size, in pixels, used for tick labels ** on this axis. ** ** @return the tick label font size in pixels ** ** @see #setTickLabelFontSize setTickLabelFontSize *""" return self.tickLabelFontSize def getTickLabelFormat(self): """* ** Returns the tick label numeric format string for this ** axis. ** ** @return numeric format used to generate tick labels. ** ** @see #setTickLabelFormat setTickLabelFormat ** *""" return self.tickLabelFormat def getTickLabelPadding(self): """* ** Returns the amount of padding (blank space) between the ** ticks and their labels.<p> ** ** @return amount of padding between ticks and their labels, ** in pixels. ** ** @see #setTickLabelPadding setTickLabelPadding ** *""" return self.tickLabelPadding # Does real work of getTickLabelThickness; flag saves time # during repeated calls made in updateChartDecorations. def getTickLabelThickness(self, needsPopulation=True): maxLength = 0 if self.tickLabelThickness != NAI: result = self.tickLabelThickness else: # use an heuristic to estimate thickness if needsPopulation: self.maybePopulateTicks() c = self.getSystemCurve(self.ticksId) nTicks = c.getNPoints() for i in range(nTicks): tt = c.getPoint(i).getAnnotationText() if None != tt: maxLength = max(maxLength, Annotation.getNumberOfCharsWide(tt)) result = int (round(maxLength * self.tickLabelFontSize * TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND)) return result def getTicksPerGridline(self): """* ** Returns the ratio of the number of ticks to the number of ** ticks that have an associated gridline. ** ** @return number of ticks per gridline for this axis ** ** @see #setTicksPerGridline setTicksPerGridline ** *""" return self.ticksPerGridline def getTicksPerLabel(self): """* ** Returns the ratio of the number of ticks to the number of ** labeled ticks. ** ** @return number of ticks per label. ** ** @see #setTicksPerLabel setTicksPerLabel ** *""" return self.ticksPerLabel def getTickLength(self): """* * Returns the length of ticks for this axis. * * @return the length of this axis' ticks, in pixels. * * @see #setTickLength setTickLength """ return self.tickLength # GChart adds a pixel to even, centered, tick lengths (only # odd-length HTML ticks can be exactly centered on 1px axis) def getActualTickLength(self): result = self.tickLength if (TickLocation.CENTERED == self.tickLocation and 0 == (self.tickLength % 2) and self.tickLength > 0): result += 1 return result def getTickLocation(self): """* * Returns relative location of ticks on this axis. * <p> * * @see #setTickLocation setTickLocation * * @return <tt>TickLocation.INSIDE</tt>, * <tt>TickLocation.OUTSIDE</tt>, or * <tt>TickLocation.CENTERED</tt> * """ return self.tickLocation def getTickSpace(self): """* Returns the amount of space along the axis reserved for * the tick marks themselves, in pixels. * <p> * * This equals the length of * the part of the tick that is outside of the plot area. * * @see #setTickLength setTickLength * @see #setTickLabelPadding setTickLabelPadding * @see #setTickLocation setTickLocation * * @return the space GChart will allocate just outside the * axis to hold any tick marks. * """ if TickLocation.CENTERED == self.tickLocation: result = (self.tickLength+1)/2; # round up to nearest pixel elif TickLocation.OUTSIDE == self.tickLocation: result = self.tickLength else: # INSIDE result = 0 return result def getTickThickness(self): """* * Returns the thickness of ticks for this axis. * * @return the thickness of this axis' ticks, in pixels. * * @see #setTickThickness setTickThickness * @see #getTickLength getTickLength """ return tickThickness def modelToClient(self, modelCoordinate): """* * Converts a coordinate position in the model units associated * with this axis into a corresponding coordinate position * expressed in standard GWT client-window pixel coordinates. * @param modelCoordinate the position along this axis defined * in the model units associated with this axis. * * @return a pixel-based coordinate that defines * the position associated with the argument in the standard * pixel, client window, coordinates of GWT. * * @see #getMouseCoordinate getMouseCoordinate * @see #clientToModel clientToModel * @see #pixelToModel pixelToModel * @see #modelToPixel modelToPixel * * """ pass def modelToPixel(self, modelCoordinate): """* * Converts a coordinate position in the model units associated * with this axis into a corresponding coordinate position * expressed in GChart's decorated chart pixel coordinates. * @param modelCoordinate a position on this axis expressed * in the model units associated with this axis. * * @return the distance, * in pixels, from the left edge (for the x axis) or top * edge (for the y or y2 axis) of * the decorated chart to the given position on this axis. * * @see #getMouseCoordinate getMouseCoordinate * @see #clientToModel clientToModel * @see #modelToClient modelToClient * @see #modelToPlotAreaPixel modelToClient * @see #pixelToModel pixelToModel * """ pass """* * Converts a coordinate position in the model units associated * with this axis into a corresponding coordinate position * expressed in GChart's plot area pixel coordinates. * <p> * * These * coordinates have their origin at the upper left corner * of the plot area, and x pixel-coordinates that increase * as you move right, and y pixel-coordinates that increase * as you move down. * <p> * * The plot area is the rectangular region bounded by the * chart's axes, and with a size specified via * <tt>setChartSize</tt>, where the chart's curves are * typically displayed. * <p> * * Apart from a shift in the origin of the pixel coordinates * used, this method works just like <tt>modelToPixel</tt> * see that method for additional details, tips, and * restrictions. * * @param modelCoordinate a position on this axis expressed * in the model units associated with this axis. * * @return the distance, * in pixels, from the left edge (for the x axis) or top * edge (for the y or y2 axis) of * the plot area to the given position on this axis. * * @see #getMouseCoordinate getMouseCoordinate * @see #plotAreaPixelToModel plotAreaPixelToModel * @see #modelToPixel modelToPixel * @see #setChartSize setChartSize * """ def modelToPlotAreaPixel(self, modelCoordinate): pass """* * Converts a coordinate position in GChart's decorated * chart pixel * coordinates into the model units associated with this axis. * <p> * * GChart's decorated chart pixel * coordinates have their origin at the upper left corner * of the decorated GChart, and x pixel-coordinates that increase * as you move right, and y pixel-coordinates that increase * as you move down. They are related to GWT's standard * client window coordinates via the following equations: * * <pre> * xClient = plotPanel.getAbsoluteLeft() * - Window.getScrollLeft() * + xPixel * yClient = plotPanel.getAbsoluteTop() * - Window.getScrollTop() * + yPixel * </pre> * <p> * * * In the above <tt>plotPanel</tt> is an internal * <tt>AbsolutePanel</tt> * GChart creates to hold the entire, decorated, chart. Apart from * borders and such applied to the GChart as a whole, its * absolute top and left positions should be the same as * those of the GChart itself. * <p> * * For example, for a completely undecorated chart (no tick labels, * legend keys, etc.) the plot area takes up the entire chart. In * that case, if the pixel units of the plot area range from * <tt>0...100</tt> along this axis, and the model coordinates range * from <tt>0...10</tt> along this axis, then * <tt>pixelToModel(pixelCoordinate)</tt> returns * <tt>pixelCoordinate/10.</tt>. <p> * * The model/pixel mapping is as of the last <tt>update</tt> * this method returns <tt>Double.NaN</tt> before the first * <tt>update</tt>. Note that, unlike <tt>clientToModel</tt> * and <tt>modelToClient</tt>, the GChart does <i>not</i> * need to be actually rendered within the browser for you to * use this method. * <p> * * <i>Tip:</i> If you need to access this mapping before * the first real update, you can explicitly specify the min and * max of this axis via <tt>setAxisMin</tt> and * <tt>setAxisMax</tt>, and then call <tt>update</tt> before adding * any curves to the chart (which, since the chart is empty, should * be very fast). This approach will allow you to convert between * model and pixel coordinates before the first real update, and * before the chart is rendered in the browser. * <p> * * @param pixelCoordinate the distance, * in pixels, from the left edge (for the x axis) or top * edge (for the y or y2 axis) of * the decorated chart to a point on this axis. * * @return that same position on this axis expressed in the * the model units associated with this axis. * * @see #getMouseCoordinate getMouseCoordinate * @see #clientToModel clientToModel * @see #modelToClient modelToClient * @see #modelToPixel modelToPixel * @see #plotAreaPixelToModel plotAreaPixelToModel * """ def pixelToModel(self, pixelCoordinate): pass """* * Converts a coordinate position in GChart's plot area * pixel * coordinates into the model units associated with this axis. * <p> * * GChart's plot area pixel * coordinates have their origin at the upper left corner * of the plot area, and x pixel-coordinates that increase * as you move right, and y pixel-coordinates that increase * as you move down. * <p> * * The plot area is the rectangular region bounded by the * chart's axes, and with a size specified via * <tt>setChartSize</tt>, where the chart's curves are * typically displayed. * <p> * * Apart from a shift in the origin of the pixel coordinates * used, this method works just like <tt>pixelToModel</tt> * see that method for additional details, tips, and * restrictions. * * @param pixelCoordinate the distance, * in pixels, from the left edge (for the x axis) or top * edge (for the y or y2 axis) of * the plot area to a point on this axis. * * @return that same position on this axis expressed in the * the model units associated with this axis. * * @see #modelToPlotAreaPixel modelToPlotAreaPixel * @see #pixelToModel pixelToModel * @see #setChartSize setChartSize * """ def plotAreaPixelToModel(self, pixelCoordinate): pass """* Specifies the label of this axis. ** <p> ** ** This label will be positioned just outside of, and ** centered lengthwise on, the region adjacent to ** this axis that GChart reserves for this axis' tick labels. ** ** @param axisLabel a Widget to use as the label of this axis. ** ** @see #getAxisLabel getAxisLabel ** @see #setTickLabelThickness setTickLabelThickness ** @see #setAxisLabelThickness setAxisLabelThickness ** """ def setAxisLabel(self, axisLabel): if isinstance(axisLabel, str): axisLabel = HTML(axisLabel) self.axisLabel = axisLabel self.chartDecorationsChanged = True """* Sets the thickness of the axis-label-holding region ** adjacent to the region allocated for tick labels.<p> ** ** The axis label widget will be centered in this region. ** Choose a thickness large enough to hold the largest ** font size you want users to be able to zoom up to ** without the axis label spilling over into ** adjacent regions. ** <p> ** ** If the axis label thickness is <tt>NAI</tt> (the ** default), and the widget defining the axis label ** implements <tt>HasHTML</tt> (or <tt>HasText</tt>) then ** GChart uses a thickness based on the estimated number of ** non-tag characters in the first <tt>&lt;br&gt;</tt> or ** <tt>&lt;li&gt;</tt> ** delimited line for y-axis labels, and based on the ** estimated number of (<tt>&lt;br&gt;</tt> or ** <tt>&lt;li&gt;</tt> delimited) ** text lines for x-axis labels.<p> ** ** Note that if the axis label is <tt>None</tt> (its ** default setting) then no space is allocated for the axis ** label, regardless of this thickness setting. ** <p> ** ** @param thickness the thickness of the axis-label-holding ** region, in pixels, or <tt>NAI</tt> to use ** GChart's character-based default thickness estimates. ** ** @see #getAxisLabelThickness getAxisLabelThickness ** @see #setAxisLabel setAxisLabel """ def setAxisLabelThickness(self, thickness): axisLabelThickness = thickness self.chartDecorationsChanged = True """* ** Specifies the maximum value visible on this axis. ** <p> ** ** Aspects of the chart rendered beyond this maximum will ** be clipped if the chart's <tt>clipToPlotArea</tt> ** property is <tt>True</tt>. ** ** <p> ** ** If <tt>Double.NaN</tt> is specified, this maximum ** is auto-determined as described in <tt>getAxisMax</tt>. ** ** <p> <i>Performance tip:</i> Using auto-determined axis ** limits (via <tt>Double.NaN</tt>) forces GChart, at the ** next update, to re-render many chart elements whenever ** the min or max data value displayed on this axis ** changes. These (often expensive) re-renderings can be ** avoided by using explicitly specified axis limits ** whenever possible. <p> ** ** @param max maximum value visible on this axis, in "model units" ** (arbitrary, application-specific, units) or <tt>Double.NaN</tt> ** (the default value) to use an auto-determined maximum. ** ** @see #getAxisMax getAxisMax ** @see #getDataMin getDataMin ** @see #getDataMax getDataMax ** @see GChart#setClipToPlotArea setClipToPlotArea ** *""" def setAxisMax(self, max): self.chartDecorationsChanged = True self.axisMax = max """* ** Specifies the minimum value of this axis. ** <p> ** ** Aspects of the chart rendered at positions before this ** minimum ** value will be clipped if the chart's ** <tt>clipToPlotArea</tt> property is <tt>True</tt>. ** <p> ** ** If <tt>Double.NaN</tt> is specified, this minimum ** is auto-determined as described in <tt>getAxisMin</tt>. ** ** <p> <i>Performance tip:</i> Using auto-determined axis ** limits (via <tt>Double.NaN</tt>) forces GChart, at the ** next update, to re-render many chart elements whenever ** the min or max data value displayed on this axis ** changes. These (often expensive) re-renderings can be ** avoided by using explicitly specified axis limits ** whenever possible. <p> ** ** @param min minimum value visible on this axis, in "model units" ** (arbitrary, application-specific, units), or Double.NaN ** (the default) to use an auto-determined minimum. ** ** @see #getAxisMin getAxisMin ** @see #getDataMin getDataMin ** @see #getDataMax getDataMax ** *""" def setAxisMin(self, min): # min can change axis label width ==> changes position of plot area self.chartDecorationsChanged = True self.axisMin = min """* ** Defines if this axis is visible. Note that ** this property only defines the visibility of the axis line ** itself, it does not control the visibility of ** tick marks or tick labels associated with the axis. ** ** <p> ** <i>Tip:</i>Tick marks can be made invisible by using ** <tt>setTickThickness</tt> to set the tick thickness ** to 0. Tick labels can be made invisible by using ** <tt>setTickLabelFontColor</tt> to set the tick label ** color to the chart's background color. ** <p> ** ** @param axisVisible False to hide axis, True to show it. ** ** @see #setTickThickness setTickThickness ** @see #setTickLabelFontColor setTickLabelFontColor ** @see #getAxisVisible getAxisVisible *""" def setAxisVisible(self, axisVisible): self.chartDecorationsChanged = True self.axisVisible = axisVisible """* ** Specifies if this axis should have gridlines. When an ** axis has gridlines, tick marks with indexes <tt>0, N, ** 2*N,...</tt> where <tt>N</tt> is the value of this axis' ** <tt>ticksPerGridline</tt> property, are in effect ** extended across the entire chart. ** ** @param hasGridlines True to display gridlines, ** False (the default) to not display them. ** ** @see #getHasGridlines getHasGridlines ** @see #setTicksPerGridline setTicksPerGridline ** *""" def setHasGridlines(self, hasGridlines): self.chartDecorationsChanged = True self.hasGridlines = hasGridlines """* Sets the number of ticks to be placed on this axis. The ** default tick count is 10. Ticks are always evenly ** spaced across the entire axis, unless explicitly ** specified via <tt>addTick</tt>. ** <p> ** ** Note that setting the tick count overrides (erases) ** any ticks explicitly specified via <tt>addTick</tt>. ** ** @param tickCount the number of ticks for this axis. ** ** @see #getTickCount getTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double, String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #setTickLabelFormat setTickLabelFormat ** @see #setTickLabelFontSize setTickLabelFontSize ** @see #setTickLabelFontStyle setTickLabelFontStyle ** @see #setTickLabelFontColor setTickLabelFontColor ** @see #setTickLabelFontWeight setTickLabelFontWeight ** *""" def setTickCount(self, tickCount): self.chartDecorationsChanged = True self.getSystemCurve(self.ticksId).clearPoints(); # eliminate user specified ticks self.tickCount = tickCount """* ** Specifies the weight of the font used in this axis' tick ** labels. ** ** @param cssWeight the weight of the font, such as bold, ** normal, light, 100, 200, ... 900, for tick labels. ** ** @see #getTickLabelFontWeight getTickLabelFontWeight ** @see #setTickLabelFormat setTickLabelFormat ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #setTickLabelFontStyle setTickLabelFontStyle ** @see #setTickLabelFontColor setTickLabelFontColor ** @see #setTickLabelFontSize setTickLabelFontSize ** @see #DEFAULT_TICK_LABEL_FONT_WEIGHT DEFAULT_TICK_LABEL_FONT_WEIGHT *""" def setTickLabelFontWeight(self, cssWeight): self.chartDecorationsChanged = True # assure that any existing ticks are updated with weight c = self.getSystemCurve(self.ticksId) nPoints = c.getNPoints() for i in range(nPoints): c.getPoint(i).setAnnotationFontWeight(cssWeight) self.tickLabelFontWeight = cssWeight """* ** Specifies the color of the font used to render tick labels ** for this axis. ** ** <p> ** For more information on standard CSS color ** specifications see the discussion in ** {@link Symbol#setBackgroundColor Symbol.setBackgroundColor}. ** <p> ** ** @param cssColor color of the font used to display this ** axis' tick labels, in standard CSS format. ** ** @see #getTickLabelFontColor getTickLabelFontColor ** @see #setTickLabelFormat setTickLabelFormat ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #setTickLabelFontStyle setTickLabelFontStyle ** @see #setTickLabelFontWeight setTickLabelFontWeight ** @see #setTickLabelFontSize setTickLabelFontSize *""" def setTickLabelFontColor(self, cssColor): self.chartDecorationsChanged = True c = self.getSystemCurve(self.ticksId) nPoints = c.getNPoints() for i in range(nPoints): c.getPoint(i).setAnnotationFontColor(cssColor) self.tickLabelFontColor = cssColor """* ** Specifies the CSS font-style of this ** axis' tick labels. ** ** @param cssStyle any valid CSS font-style, namely, ** normal, italic, oblique, or inherit. ** ** @see #getTickLabelFontStyle getTickLabelFontStyle ** @see #setTickLabelFormat setTickLabelFormat ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #setTickLabelFontColor setTickLabelFontColor ** @see #setTickLabelFontWeight setTickLabelFontWeight ** @see #setTickLabelFontSize setTickLabelFontSize ** @see #DEFAULT_TICK_LABEL_FONT_STYLE ** DEFAULT_TICK_LABEL_FONT_STYLE *""" def setTickLabelFontStyle(self, cssStyle): self.chartDecorationsChanged = True c = self.getSystemCurve(self.ticksId) nPoints = c.getNPoints() for i in range(nPoints): c.getPoint(i).setAnnotationFontStyle(cssStyle) self.tickLabelFontStyle = cssStyle """* ** Sets the CSS font size for tick labels on this ** axis, in pixels. ** ** @param tickLabelFontSize the font size of tick labels ** displayed on this axis. ** ** @see #getTickLabelFontSize getTickLabelFontSize ** @see #setTickLabelFormat setTickLabelFormat ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** @see #setTickLabelFontStyle setTickLabelFontStyle ** @see #setTickLabelFontColor setTickLabelFontColor ** @see #setTickLabelFontWeight setTickLabelFontWeight ** @see GChart#DEFAULT_TICK_LABEL_FONTSIZE DEFAULT_TICK_LABEL_FONTSIZE ** *""" def setTickLabelFontSize(self, tickLabelFontSize): self.chartDecorationsChanged = True c = self.getSystemCurve(self.ticksId) nPoints = c.getNPoints() for i in range(nPoints): c.getPoint(i).setAnnotationFontSize(tickLabelFontSize) self.tickLabelFontSize = tickLabelFontSize """* * Specifies a format string to be used in * converting the numeric values associated with each * tick on this axis into tick labels. This string must * follow the conventions of the number format patterns * used by the GWT <a * href="http:#google-web-toolkit.googlecode.com/svn/javadoc/1.4/com/google/gwt/i18n/client/NumberFormat.html"> * NumberFormat</a> class, <i>with three * exceptions:</i> * <p> * <ol> * * <li><i>Log10 inverse prefix</i>: If the string begins * with the prefix <tt>=10^</tt> the value is replaced with * <tt>pow(10.,value)</tt> and the so-transformed value is * then formatted using the part of the format string that * comes after this prefix, which must be a valid GWT * <tt>NumberFormat</tt> pattern (e.g. "##.##"). * <p> * For an example of how to use this prefix to create a * semi-log plot, see <a * href="package-summary.html#GChartExample04">the * Chart Gallery's GChartExample04</a>. * <p> * * <li><i>Log2 inverse prefix</i>: If the string begins with * the prefix <tt>=2^</tt> the value is replaced with * <tt>pow(2.,value)</tt> and the so-transformed value is * then formatted using the part of the format string that * comes after this prefix, which must be a valid GWT * <tt>NumberFormat</tt> pattern. * <p> * * <li><i>Date casting prefix</i>: If the string begins with * the prefix <tt>=(Date)</tt> the value is replaced with * <tt>Date((long) value)</tt> and the so-transformed * value is then formatted using the format string that * comes after this prefix, which must be a valid GWT * <a href="http:#google-web-toolkit.googlecode.com/svn/javadoc/1.4/com/google/gwt/i18n/client/DateTimeFormat.html"> * DateTimeFormat</a> pattern (e.g. "yyyy-MM-dd&nbsp;HH:mm"). * For the special case format string of <tt>"=(Date)"</tt> * (just the date casting prefix) GChart uses the * <tt>DateTimeFormat</tt> returned by the * <tt>DateTimeFormat.getShortDateTimeFormat</tt> method. <p> * * Note that the values associated with this Axis must * represent the number of milliseconds since January 1, * 1970 (in the GMT time zone) whenever this date * casting prefix is used. <p> * * * For example, if the x-axis tick label format were * "=(Date)MMM-dd-yyyy HH", then, for a tick located at the * x position of 0, the tick label would be "Jan-01-1970 00" * (on a client in the GMT time zone) and for a tick located * at the x position of 25*60*60*1000 (one day + one hour, * in milliseconds) the tick label would be "Jan-02-1970 01" * (again, on a GMT-based client). Results would be * shifted appropriately on clients in different time zones. * <p> * * Note that if your chart is based on absolute, GMT-based, * millisecond times then date labels will change when your * chart is displayed on clients in different time zones. * Sometimes, this is what you want. To keep the date labels * the same in all time zones, convert date labels into Java * <tt>Date</tt> objects in your client-side code, then use * the <tt>Date.getTime</tt> method, also in your * client-side code, to convert those dates into the * millisecond values GChart requires. The <a * href="package-summary.html#GChartExample12"> Chart * Gallery's GChartExample12</a> illustrates how to use this * second approach to produce a time series chart whose * date-time labels are the same in all time zones. * * <p> * * <blockquote><small> * * Ben Martin describes an alter(and more flexible) * approach to formatting time series tick labels in his <a * href="http:#www.linux.com/feature/132854">GChart * tutorial</a>. Ben's article, along with Malcolm Gorman's * related <a * href="http:#groups.google.com/group/Google-Web-Toolkit/msg/6125ce39fd2339ac"> * GWT forum post</a> were the origin of this date * casting prefix. Thanks! </small></blockquote> * * </ol> * <p> * * * <p> Though HTML text is not supported in the tick label * format string, you can change the size, weight, style, and * color of tick label text via the * <tt>setTickLabelFont*</tt> family of methods. You * <i>can</i> use HTML in tick labels (e.g. for a multi-line * x-axis label) but but only if you define each tick label * explicitly using the <tt>addTick</tt> method. * * @param format an appropriately prefixed * GWT <tt>NumberFormat</tt> compatible or * GWT <tt>DateTimeFormat</tt> compatible format string that * defines how to convert tick values into tick labels. * * @see #setTickCount setTickCount * @see #addTick(double) addTick(double) * @see #addTick(double,String) addTick(double,String) * @see #addTick(double,String,int,int) addTick(double,String,int,int) * @see #addTick(double,Widget) addTick(double,Widget) * @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) * @see #setTickLabelFontSize setTickLabelFontSize * @see #setTickLabelFontStyle setTickLabelFontStyle * @see #setTickLabelFontColor setTickLabelFontColor * @see #setTickLabelFontWeight setTickLabelFontWeight * @see #getTickLabelFormat getTickLabelFormat """ def setTickLabelFormat(self, format): # interpret prefixes and create an appropriate formatter if self.tickLabelFormat == format: return self.chartDecorationsChanged = True if format.startswith("=(Date)"): transFormat = format[len("=(Date)"):] if transFormat.equals(""): # so "=(Date)" works self.dateFormat = DateTimeFormat.getShortDateTimeFormat() else: # e.g. "=(Date)mm/dd/yy hh:mm" self.dateFormat = DateTimeFormat.getFormat(transFormat) self.tickLabelFormatType = DATE_FORMAT_TYPE elif format.startswith("=10^"): transFormat = format[len("=10^"):] self.numberFormat = NumberFormat.getFormat(transFormat) self.tickLabelFormatType = LOG10INVERSE_FORMAT_TYPE elif format.startswith("=2^"): transFormat = format[len("=2^"):] self.numberFormat = NumberFormat.getFormat(transFormat) self.tickLabelFormatType = LOG2INVERSE_FORMAT_TYPE else: self.numberFormat = NumberFormat.getFormat(format) self.tickLabelFormatType = NUMBER_FORMAT_TYPE # remember original format (for use with the getter) self.tickLabelFormat = format """* Specifies the number of pixels of padding (blank space) ** between the tick marks and their labels. <p> ** ** With the default of <tt>0</tt>, each ** tick label is flush against its tick mark. ** ** @param tickLabelPadding the amount of padding between ** tick labels and tick marks, in pixels. ** ** ** @see #getTickLabelPadding getTickLabelPadding ** @see #setTickLength setTickLength ** @see #setTickLocation setTickLocation ** *""" def setTickLabelPadding(self, tickLabelPadding): self.chartDecorationsChanged = True self.tickLabelPadding = tickLabelPadding """* Specifies the thickness of the region adjacent to ** this axis that GChart will reserve for purposes of ** holding this axis' tick labels. <p> ** <p> ** ** For vertical axes, this represents the width of the ** widest tick label, for horizontal axes, this represents ** the height of highest tick label. ** <p> ** ** ** By default, this property has the special "undefined" ** value <tt>NAI</tt>. With this value, the ** companion method <tt>getTickLabelThickness</tt> uses an ** HTML-based heuristic to estimate the thickness. ** ** ** @see #getTickLabelThickness getTickLabelThickness ** @see #setTickLabelFontSize setTickLabelFontSize ** @see #setTickLocation setTickLocation ** @see #setTickLabelPadding setTickLabelPadding ** @see #setAxisLabel setAxisLabel ** @see NAI NAI ** *""" def setTickLabelThickness(self, tickLabelThickness): self.chartDecorationsChanged = True self.tickLabelThickness = tickLabelThickness """* Specifies the ratio of the number of tick marks on the ** axis, to the number of gridlines on the axis. ** <p> ** ** For example, with the default value of 1, every tick has ** an associated gridline, whereas with a ** <tt>ticksPerGridline</tt> setting of 2, only the first, ** third, fifth, etc. ticks have gridlines. ** ** <p> ** ** This setting only has an impact when the axis' gridlines ** are turned on, that is, when this axis' ** <tt>getHasGridlines</tt> method returns True. ** ** @see #setHasGridlines setHasGridlines ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** ** @param ticksPerGridline the number of ticks on this ** axis per "gridline-extended" tick. ** *""" def setTicksPerGridline(self, ticksPerGridline): if ticksPerGridline <= 0: raise IllegalArgumentException("ticksPerGridline=" + ticksPerGridline + "; ticksPerGridline must be > 0") self.chartDecorationsChanged = True self.ticksPerGridline = ticksPerGridline """* Specifies the ratio of the number of tick marks on the ** axis, to the number of labeled tick marks on the axis. ** <p> ** ** For example, with the default value of 1, every tick is ** labeled, whereas with a <tt>ticksPerLabel</tt> setting ** of 2, only the first, third, fifth, etc. ticks are ** labeled. ** ** <p> ** ** This setting is only used when tick labels ** are specified implicitly via <tt>setTickCount</tt>. It ** is ignored when tick positions and their labels are ** explicitly specified via <tt>addTick</tt>. ** ** @see #setTickCount setTickCount ** @see #addTick(double) addTick(double) ** @see #addTick(double,String) addTick(double,String) ** @see #addTick(double,String,int,int) addTick(double,String,int,int) ** @see #addTick(double,Widget) addTick(double,Widget) ** @see #addTick(double,Widget,int,int) addTick(double,Widget,int,int) ** ** @param ticksPerLabel the ratio of the number of ticks, ** to the number of labeled ticks. ** *""" def setTicksPerLabel(self, ticksPerLabel): self.chartDecorationsChanged = True if ticksPerLabel <= 0: raise IllegalArgumentException("ticksPerLabel=" + ticksPerLabel + "; ticksPerLabel must be > 0") self.ticksPerLabel = ticksPerLabel """* * Sets this axis' tick length. Set the tick length to zero to * eliminate the tick entirely. * <p> * * * @param tickLength the length of the tick. * * @see #getTickLength getTickLength * @see #setTickThickness setTickThickness * @see #setTickLabelPadding setTickLabelPadding * @see #setTickLocation setTickLocation * """ def setTickLength(self, tickLength): pass """* * Specifies the location of the tick marks relative to this * axis, namely, if tick marks are outside, inside, or * centered on this axis. * <p> * * @see #getTickLocation getTickLocation * @see #setTickThickness setTickThickness * @see #setTickLength setTickLength * @see #setTickLabelPadding setTickLabelPadding * * @param tickLocation Specify either * <tt>TickLocation.INSIDE</tt>, * <tt>TickLocation.OUTSIDE</tt>, or * <tt>TickLocation.CENTERED</tt> * """ def setTickLocation(self, tickLocation): self.tickLocation = tickLocation self.chartDecorationsChanged = True sym = self.getSystemCurve(self.ticksId).getSymbol() if self.isHorizontalAxis: sym.setSymbolType(tickLocation.getXAxisSymbolType(self.axisPosition)) sym.setHeight(self.getActualTickLength()) else: sym.setSymbolType(tickLocation.getYAxisSymbolType(self.axisPosition)) sym.setWidth(self.getActualTickLength()) """* * Sets this axis' tick thickness. * <p> * * @param tickThickness the thickness of the tick. * * @see #getTickThickness getTickThickness * @see #setTickLength setTickLength * @see #setTickLabelPadding setTickLabelPadding * @see #setTickLocation setTickLocation * """ def setTickThickness(self, tickThickness): pass def maybePopulateTicks(self): if self.tickCount != NAI: self.populateTicks() # fills in the tick positions when auto-generated. def populateTicks(self): self.getSystemCurve(self.ticksId).clearPoints() #TODO: It should be possible to control the visibility of each axis, # including ticks and tick labels, independent of the specifications of # the tick marks on that axis, and independent of if any curves are # mapped to that axis or not. A setVisible(Boolean isVisible) as a # three-way, with None being the current, dependent, defaults, and # TRUE, FALSE explicitly making the entire axis, including tick marks # and labels visible or not without having to zero the tick count, add # dummy curve to the axis, etc. to control axis visibility is needed. # x, y ticks are drawn even # if no curves are on these axes if XTICKS_ID == self.ticksId or YTICKS_ID == self.ticksId or 0 < self.getNCurvesVisibleOnAxis(): l = self.getAxisLimits() for i in range(self.tickCount): # linear interpolation between min and max if (self.tickCount == 1): position = l.max else: position = ((l.min * ((self.tickCount-1)-i) + i * l.max)/ (self.tickCount-1.0)) self.addTickAsPoint(position, (0 == i % self.ticksPerLabel) and self.formatAsTickLabel(position) or None, None, NAI, NAI) # fills in the gridlines; ticks are assumed already populated def populateGridlines(self): cTicks = self.getSystemCurve(self.ticksId) cGridlines = self.getSystemCurve(self.gridlinesId) cGridlines.clearPoints() nTicks = cTicks.getNPoints() for iTick in range(nTicks): if self.hasGridlines and (iTick % self.ticksPerGridline) == 0: p = cTicks.getPoint(iTick) cGridlines.addPoint(p.getX(), p.getY()) def _getAxisLimits(self, result): # so we get 1-unit changes between adjacent ticks DEFAULT_AXIS_RANGE = DEFAULT_TICK_COUNT-1 min = self.getAxisMin() max = self.getAxisMax() # Adjust min/max so that special cases, like one-point # charts, do not have axes that shrink down to a point, # which would create numerical and visual difficulties. if (Double.NaN==(min)) and (Double.NaN==(max)): # e.g. no data and no explicitly specified ticks min = 0 max = min + DEFAULT_AXIS_RANGE elif (Double.NaN==(min)) and not (Double.NaN==(max)): # e.g. no data but only max explicitly set min = max - DEFAULT_AXIS_RANGE elif not (Double.NaN==(min)) and (Double.NaN==(max)): # e.g. no data but only min explicitly set max = min + DEFAULT_AXIS_RANGE elif min == max: # e.g one data point only, or they set min=max max = min + DEFAULT_AXIS_RANGE result.min = min result.max = max def getAxisLimits(self): self._getAxisLimits(self.currentLimits) return self.currentLimits def rememberLimits(self): self._getAxisLimits(self.previousLimits) def limitsChanged(self): return not self.getAxisLimits().equals(self.previousLimits) """ similar to getTickText, except for the tick position """ def getTickPosition(self, c, iTick): if self.isHorizontalAxis: result = c.getPoint(iTick).getX() else: result = c.getPoint(iTick).getY() return result # returns the largest, explicitly specified, tick position def getTickMax(self): result = -Double.MAX_VALUE c = self.getSystemCurve(self.ticksId) nTicks = c.getNPoints() for i in range(nTicks): result = max(result, self.getTickPosition(c, i)) return result # returns the smallest, explicitly specified, tick position def getTickMin(self): result = Double.MAX_VALUE c = self.getSystemCurve(self.ticksId) nTicks = c.getNPoints() for i in range(nTicks): result = min(result, self.getTickPosition(c, i)) return result # Same as max, except treats NaN/MAX_VALUE values as "not there" def maxIgnoreNaNAndMaxValue(self, x1, x2): if Double.NaN==(x1) or Double.MAX_VALUE == x1 or -Double.MAX_VALUE == x1: result = x2 elif Double.NaN==(x2) or Double.MAX_VALUE == x2 or -Double.MAX_VALUE == x2: result = x1 else: result = max(x1, x2) return result # Same as min, except treats NaN/MAX_VALUE values as "not there" def minIgnoreNaNAndMaxValue(self, x1, x2): if Double.NaN==(x1) or Double.MAX_VALUE == x1 or -Double.MAX_VALUE == x1: result = x2 elif Double.NaN==(x2) or Double.MAX_VALUE == x2 or -Double.MAX_VALUE == x2: result = x1 else: result = min(x1, x2) return result # does a dummy set of any dynamically determined axis # limit, so, for update purposes, they are considered # to have changed. def invalidateDynamicAxisLimits(self): if (Double.NaN==(axisMin)): self.setAxisMin(axisMin) if (Double.NaN==(axisMax)): self.setAxisMax(axisMax) """* The x-axis of a GChart. * * @see GChart#getXAxis getXAxis """ class XAxis(Axis): def __init__(self, chart): Axis.__init__(self, chart) self.isHorizontalAxis = True self.ticksId = XTICKS_ID self.gridlinesId = XGRIDLINES_ID self.axisId = XAXIS_ID self.axisPosition = -1 self.tickLabelFormatType = None self.setTickLocation(TickLocation.DEFAULT_TICK_LOCATION) self.setTickThickness(DEFAULT_TICK_THICKNESS) self.setTickLength(DEFAULT_TICK_LENGTH) def clientToModel(clientCoordinate): xPixel = (Window.getScrollLeft() + clientCoordinate - self.chart.plotPanel.getAbsoluteLeft()) result = self.chart.plotPanel.xChartPixelToX(xPixel) return result def getAxisLabelThickness(self): EXTRA_CHARHEIGHT = 2; # 1-char space above & below DEF_CHARHEIGHT = 1 result = 0 if None == self.getAxisLabel(): result = 0 elif NAI != self.axisLabelThickness: result = self.axisLabelThickness elif hasattr(self.getAxisLabel(), "getHTML"): charHeight = htmlHeight( self.getAxisLabel().getHTML()) result = int (round((EXTRA_CHARHEIGHT+charHeight) * self.getTickLabelFontSize() * TICK_CHARHEIGHT_TO_FONTSIZE_LOWERBOUND)) else: result = int (round( (EXTRA_CHARHEIGHT + DEF_CHARHEIGHT) * self.getTickLabelFontSize() * TICK_CHARWIDTH_TO_FONTSIZE_LOWERBOUND)) return result def getDataMax(self): result = -Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue nPoints = c.getNPoints() for j in range(nPoints): result = self.maxIgnoreNaNAndMaxValue(result, c.getPoint(j).getX()) if result == -Double.MAX_VALUE: return Double.NaN return result def getDataMin(self): result = Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue nPoints = c.getNPoints() for j in range(nPoints): result = self.minIgnoreNaNAndMaxValue(result, c.getPoint(j).getX()) if result == Double.MAX_VALUE: return Double.NaN return result def getMouseCoordinate(self): result = self.chart.plotPanel.xChartPixelToX(self.chart.plotPanel.getXMouse()) return result def getTickLabelThickness(self, needsPopulation=True): # overrides base class if self.tickLabelThickness != NAI: result = self.tickLabelThickness elif self.getTickCount() == 0: result = 0 else: # XXX: single line labels assumed; these have height # almost equal to the fontSize in pixels. Not really # right, since multi-line HTML can now be used, but user # can explicitly change tick label thickness with # multi-line, HTML based, ticks, so OK for now. result = int (round( TICK_CHARHEIGHT_TO_FONTSIZE_LOWERBOUND * self.tickLabelFontSize)) return result def modelToClient(self, modelCoordinate): xPixel = self.chart.plotPanel.xToChartPixel(modelCoordinate) result = (self.chart.plotPanel.getAbsoluteLeft() - Window.getScrollLeft() + xPixel ) return result def modelToPixel(self, modelCoordinate): result = self.chart.plotPanel.xToChartPixel(modelCoordinate) return result def modelToPlotAreaPixel(self, modelCoordinate): result = self.chart.plotPanel.xToPixel(modelCoordinate) return result def pixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.xChartPixelToX(pixelCoordinate) return result def plotAreaPixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.xPixelToX(pixelCoordinate) return result def setTickLength(self, tickLength): self.chartDecorationsChanged = True self.tickLength = tickLength self.getSystemCurve(self.ticksId).getSymbol().setHeight( self.getActualTickLength()) def setTickThickness(self, tickThickness): self.tickThickness = tickThickness self.getSystemCurve(self.ticksId).getSymbol().setWidth(tickThickness) # INDENT ERROR} end of class XAxis """* The right, or "y2", axis of a GChart. * * @see GChart#getY2Axis getY2Axis """ class Y2Axis(Axis): def __init__(self, chart): Axis.__init__(self, chart) self.isHorizontalAxis = False self.ticksId = Y2TICKS_ID self.gridlinesId = Y2GRIDLINES_ID self.axisId = Y2AXIS_ID self.axisPosition = 1 self.setTickLocation(TickLocation.DEFAULT_TICK_LOCATION) self.setTickThickness(DEFAULT_TICK_THICKNESS) self.setTickLength(DEFAULT_TICK_LENGTH) def clientToModel(clientCoordinate): yPixel = (Window.getScrollTop() + clientCoordinate - self.chart.plotPanel.getAbsoluteTop()) result = self.chart.plotPanel.yChartPixelToY2(yPixel) return result def getDataMax(self): result = -Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue if c.getYAxis() == Y2_AXIS: nPoints = c.getNPoints() for j in range(nPoints): result = self.maxIgnoreNaNAndMaxValue(result, c.getPoint(j).getY()) if result == -Double.MAX_VALUE: return Double.NaN return result def getDataMin(self): result = Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue if c.getYAxis() == Y2_AXIS: nPoints = c.getNPoints() for j in range(nPoints): result = self.minIgnoreNaNAndMaxValue(result, c.getPoint(j).getY()) if result == Double.MAX_VALUE: return Double.NaN return result def getMouseCoordinate(self): result = self.chart.plotPanel.yChartPixelToY2(self.chart.plotPanel.getYMouse()) return result def modelToClient(self, modelCoordinate): yPixel = self.chart.plotPanel.yToChartPixel(modelCoordinate, True) result = self.chart.plotPanel.getAbsoluteTop() - Window.getScrollTop() + yPixel return result def modelToPixel(self, modelCoordinate): result = self.chart.plotPanel.yToChartPixel(modelCoordinate, True) return result def modelToPlotAreaPixel(self, modelCoordinate): result = self.chart.plotPanel.yToPixel(modelCoordinate, True) return result def pixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.yChartPixelToY2(pixelCoordinate) return result def plotAreaPixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.yPixelToY2(pixelCoordinate) return result def setTickLength(self, tickLength): self.chartDecorationsChanged = True self.tickLength = tickLength self.getSystemCurve(self.ticksId).getSymbol().setWidth(self.getActualTickLength()) def setTickThickness(self, tickThickness): self.tickThickness = tickThickness self.getSystemCurve(self.ticksId).getSymbol().setHeight(tickThickness) """* The left y-axis of a GChart. * * @see GChart#getYAxis getYAxis * """ class YAxis(Axis): def __init__(self, chart): Axis.__init__(self, chart) self.isHorizontalAxis = False self.ticksId = YTICKS_ID self.gridlinesId = YGRIDLINES_ID self.axisId = YAXIS_ID self.axisPosition = -1 self.setTickLocation(TickLocation.DEFAULT_TICK_LOCATION) self.setTickThickness(DEFAULT_TICK_THICKNESS) self.setTickLength(DEFAULT_TICK_LENGTH) def clientToModel(clientCoordinate): yPixel = (Window.getScrollTop() + clientCoordinate - self.chart.plotPanel.getAbsoluteTop()) result = self.chart.plotPanel.yChartPixelToY(yPixel) return result def getDataMax(self): result = -Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue if c.getYAxis() == Y_AXIS: nPoints = c.getNPoints() for j in range(nPoints): result = self.maxIgnoreNaNAndMaxValue(result, c.getPoint(j).getY()) if result == -Double.MAX_VALUE: return Double.NaN return result def getDataMin(self): result = Double.MAX_VALUE nCurves = self.chart.getNCurves() for i in range(nCurves): c = self.getSystemCurve(i) if not c.isVisible(): continue if c.getYAxis() == Y_AXIS: nPoints = c.getNPoints() for j in range(nPoints): result = self.minIgnoreNaNAndMaxValue(result, c.getPoint(j).getY()) if result == Double.MAX_VALUE: return Double.NaN return result def getMouseCoordinate(self): result = self.chart.plotPanel.yChartPixelToY(self.chart.plotPanel.getYMouse()) return result def modelToClient(self, modelCoordinate): yPixel = self.chart.plotPanel.yToChartPixel(modelCoordinate, False) result = self.chart.plotPanel.getAbsoluteTop() - Window.getScrollTop() + yPixel return result def modelToPixel(self, modelCoordinate): result = self.chart.plotPanel.yToChartPixel(modelCoordinate, False) return result def modelToPlotAreaPixel(self, modelCoordinate): result = self.chart.plotPanel.yToPixel(modelCoordinate, False) return result def pixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.yChartPixelToY(pixelCoordinate) return result def plotAreaPixelToModel(self, pixelCoordinate): result = self.chart.plotPanel.yPixelToY(pixelCoordinate) return result def setTickLength(self, tickLength): self.chartDecorationsChanged = True self.tickLength = tickLength self.getSystemCurve(self.ticksId).getSymbol().setWidth( self.getActualTickLength()) def setTickThickness(self, tickThickness): self.tickThickness = tickThickness self.getSystemCurve(self.ticksId).getSymbol().setHeight(tickThickness)
apache-2.0
brendancsmith/cohort-facebook
lib/word_cloud-master/doc/conf.py
17
9715
# -*- coding: utf-8 -*- # # wordcloud documentation build configuration file, created by # sphinx-quickstart on Fri May 3 17:14:50 2013. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['gen_rst', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'numpy_ext.numpydoc'] autosummary_generate = True autodoc_default_flags = ['members', 'inherited-members'] # generate autosummary even if no references autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'wordcloud' copyright = u'2013, Andreas Mueller' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_templates', '_themes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'bootstrap' # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_themes'] html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() #The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'wordclouddoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). #latex_documents = [('index', 'wordcloud.tex', u'wordcloud Documentation', #u'Andreas Mueller', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'wordcloud', u'wordcloud Documentation', [u'Andreas Mueller'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'wordcloud', u'wordcloud Documentation', u'Andreas Mueller', 'wordcloud', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Theme options are theme-specific and customize the look and feel of a # theme further. html_theme_options = { # Navigation bar title. (Default: ``project`` value) 'navbar_title': "Wordcloud", # Tab name for entire site. (Default: "Site") #'navbar_site_name': "Site", # A list of tuples containting pages to link to. The value should # be in the form [(name, page), ..] 'navbar_links': [ ('Examples', 'auto_examples/index'), ('References', 'references'), ], # Global TOC depth for "site" navbar tab. (Default: 1) # Switching to -1 shows all levels. 'globaltoc_depth': 0, # Include hidden TOCs in Site navbar? # # Note: If this is "false", you cannot have mixed ``:hidden:`` and # non-hidden ``toctree`` directives in the same page, or else the build # will break. # # Values: "true" (default) or "false" 'globaltoc_includehidden': "true", # HTML navbar class (Default: "navbar") to attach to <div> element. # For black navbar, do "navbar navbar-inverse" 'navbar_class': "navbar", # Fix navigation bar to top of page? # Values: "true" (default) or "false" 'navbar_fixed_top': "true", # Location of link to source. # Options are "nav" (default), "footer" or anything else to exclude. 'source_link_position': "None", # Bootswatch (http://bootswatch.com/) theme. # # Options are nothing with "" (default) or the name of a valid theme # such as "amelia" or "cosmo". # # Note that this is served off CDN, so won't be available offline. #'bootswatch_theme': "united", }
mit
vismartltd/edx-platform
common/djangoapps/student/migrations/0021_remove_askbot.py
188
11582
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models ASKBOT_AUTH_USER_COLUMNS = ( 'website', 'about', 'gold', 'email_isvalid', 'real_name', 'location', 'reputation', 'gravatar', 'bronze', 'last_seen', 'silver', 'questions_per_page', 'new_response_count', 'seen_response_count', ) class Migration(SchemaMigration): def forwards(self, orm): "Kill the askbot" try: # For MySQL, we're batching the alters together for performance reasons if db.backend_name == 'mysql': drops = ["drop `{0}`".format(col) for col in ASKBOT_AUTH_USER_COLUMNS] statement = "alter table `auth_user` {0};".format(", ".join(drops)) db.execute(statement) else: for column in ASKBOT_AUTH_USER_COLUMNS: db.delete_column('auth_user', column) except Exception as ex: print "Couldn't remove askbot because of {0} -- it was probably never here to begin with.".format(ex) def backwards(self, orm): raise RuntimeError("Cannot reverse this migration: there's no going back to Askbot.") models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'student.courseenrollment': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'}, 'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'student.pendingemailchange': { 'Meta': {'object_name': 'PendingEmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.pendingnamechange': { 'Meta': {'object_name': 'PendingNameChange'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.registration': { 'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'student.testcenteruser': { 'Meta': {'object_name': 'TestCenterUser'}, 'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}), 'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'client_candidate_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}), 'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}), 'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}), 'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}), 'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}) }, 'student.userprofile': { 'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"}, 'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), 'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}), 'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) }, 'student.usertestgroup': { 'Meta': {'object_name': 'UserTestGroup'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'}) } } complete_apps = ['student']
agpl-3.0
Liam3851/fredapi
fredapi/fred.py
1
19208
import os import sys import xml.etree.ElementTree as ET if sys.version_info[0] >= 3: import urllib.request as url_request import urllib.parse as url_parse import urllib.error as url_error else: import urllib2 as url_request import urllib as url_parse import urllib2 as url_error import pandas as pd urlopen = url_request.urlopen quote_plus = url_parse.quote_plus urlencode = url_parse.urlencode HTTPError = url_error.HTTPError class Fred(object): earliest_realtime_start = '1776-07-04' latest_realtime_end = '9999-12-31' nan_char = '.' max_results_per_request = 1000 root_url = 'https://api.stlouisfed.org/fred' def __init__(self, api_key=None, api_key_file=None): """ Initialize the Fred class that provides useful functions to query the Fred dataset. You need to specify a valid API key in one of 3 ways: pass the string via api_key, or set api_key_file to a file with the api key in the first line, or set the environment variable 'FRED_API_KEY' to the value of your api key. You can sign up for a free api key on the Fred website at http://research.stlouisfed.org/fred2/ """ self.api_key = None if api_key is not None: self.api_key = api_key elif api_key_file is not None: f = open(api_key_file, 'r') self.api_key = f.readline().strip() f.close() else: self.api_key = os.environ.get('FRED_API_KEY') if self.api_key is None: import textwrap raise ValueError(textwrap.dedent("""\ You need to set a valid API key. You can set it in 3 ways: pass the string with api_key, or set api_key_file to a file with the api key in the first line, or set the environment variable 'FRED_API_KEY' to the value of your api key. You can sign up for a free api key on the Fred website at http://research.stlouisfed.org/fred2/""")) def __fetch_data(self, url): """ helper function for fetching data given a request URL """ url += '&api_key=' + self.api_key try: response = urlopen(url) root = ET.fromstring(response.read()) except HTTPError as exc: root = ET.fromstring(exc.read()) raise ValueError(root.get('message')) return root def _parse(self, date_str, format='%Y-%m-%d'): """ helper function for parsing FRED date string into datetime """ rv = pd.to_datetime(date_str, format=format) if hasattr(rv, 'to_pydatetime'): rv = rv.to_pydatetime() return rv def get_series_info(self, series_id): """ Get information about a series such as its title, frequency, observation start/end dates, units, notes, etc. Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- info : Series a pandas Series containing information about the Fred series """ url = "%s/series?series_id=%s" % (self.root_url, series_id) root = self.__fetch_data(url) if root is None or not len(root): raise ValueError('No info exists for series id: ' + series_id) info = pd.Series(root.getchildren()[0].attrib) return info def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs): """ Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release() Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' observation_start : datetime or datetime-like str such as '7/1/2014', optional earliest observation date observation_end : datetime or datetime-like str such as '7/1/2014', optional latest observation date kwargs : additional parameters Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ url = "%s/series/observations?series_id=%s" % (self.root_url, series_id) if observation_start is not None: observation_start = pd.to_datetime(observation_start, errors='raise') url += '&observation_start=' + observation_start.strftime('%Y-%m-%d') if observation_end is not None: observation_end = pd.to_datetime(observation_end, errors='raise') url += '&observation_end=' + observation_end.strftime('%Y-%m-%d') if kwargs.keys(): url += '&' + urlencode(kwargs) root = self.__fetch_data(url) if root is None: raise ValueError('No data exists for series id: ' + series_id) data = {} for child in root.getchildren(): val = child.get('value') if val == self.nan_char: val = float('NaN') else: val = float(val) data[self._parse(child.get('date'))] = val return pd.Series(data) def get_series_latest_release(self, series_id): """ Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series() Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- info : Series a Series where each index is the observation date and the value is the data for the Fred series """ return self.get_series(series_id) def get_series_first_release(self, series_id): """ Get first-release data for a Fred series id. This ignores any revision to the data series. For instance, The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0. This will ignore revisions after the first release. Parameters ---------- series_id : str Fred series id such as 'GDP' Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ df = self.get_series_all_releases(series_id) first_release = df.groupby('date').head(1) data = first_release.set_index('date')['value'] return data def get_series_as_of_date(self, series_id, as_of_date): """ Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series before or on as_of_date, but ignores any revision on dates after as_of_date. Parameters ---------- series_id : str Fred series id such as 'GDP' as_of_date : datetime, or datetime-like str such as '10/25/2014' Include data revisions on or before this date, and ignore revisions afterwards Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ as_of_date = pd.to_datetime(as_of_date) df = self.get_series_all_releases(series_id) data = df[df['realtime_start'] <= as_of_date] return data def get_series_all_releases(self, series_id): """ Get all data for a Fred series id including first releases and all revisions. This returns a DataFrame with three columns: 'date', 'realtime_start', and 'value'. For instance, the US GDP for Q4 2013 was first released to be 17102.5 on 2014-01-30, and then revised to 17080.7 on 2014-02-28, and then revised to 17089.6 on 2014-03-27. You will therefore get three rows with the same 'date' (observation date) of 2013-10-01 but three different 'realtime_start' of 2014-01-30, 2014-02-28, and 2014-03-27 with corresponding 'value' of 17102.5, 17080.7 and 17089.6 Parameters ---------- series_id : str Fred series id such as 'GDP' Returns ------- data : DataFrame a DataFrame with columns 'date', 'realtime_start' and 'value' where 'date' is the observation period and 'realtime_start' is when the corresponding value (either first release or revision) is reported. """ url = "%s/series/observations?series_id=%s&realtime_start=%s&realtime_end=%s" % (self.root_url, series_id, self.earliest_realtime_start, self.latest_realtime_end) root = self.__fetch_data(url) if root is None: raise ValueError('No data exists for series id: ' + series_id) data = {} i = 0 for child in root.getchildren(): val = child.get('value') if val == self.nan_char: val = float('NaN') else: val = float(val) realtime_start = self._parse(child.get('realtime_start')) # realtime_end = self._parse(child.get('realtime_end')) date = self._parse(child.get('date')) data[i] = {'realtime_start': realtime_start, # 'realtime_end': realtime_end, 'date': date, 'value': val} i += 1 data = pd.DataFrame(data).T return data def get_series_vintage_dates(self, series_id): """ Get a list of vintage dates for a series. Vintage dates are the dates in history when a series' data values were revised or new data values were released. Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- dates : list list of vintage dates """ url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id) root = self.__fetch_data(url) if root is None: raise ValueError('No vintage date exists for series id: ' + series_id) dates = [] for child in root.getchildren(): dates.append(self._parse(child.text)) return dates def __do_series_search(self, url): """ helper function for making one HTTP request for data, and parsing the returned results into a DataFrame """ root = self.__fetch_data(url) series_ids = [] data = {} num_results_returned = 0 # number of results returned in this HTTP request num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned for child in root.getchildren(): num_results_returned += 1 series_id = child.get('id') series_ids.append(series_id) data[series_id] = {"id": series_id} fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end", "frequency", "frequency_short", "units", "units_short", "seasonal_adjustment", "seasonal_adjustment_short", "last_updated", "popularity", "notes"] for field in fields: data[series_id][field] = child.get(field) if num_results_returned > 0: data = pd.DataFrame(data, columns=series_ids).T # parse datetime columns for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]: data[field] = data[field].apply(self._parse, format=None) # set index name data.index.name = 'series id' else: data = None return data, num_results_total def __get_search_results(self, url, limit, order_by, sort_order, filter): """ helper function for getting search results up to specified limit on the number of results. The Fred HTTP API truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data. """ order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'] if order_by is not None: if order_by in order_by_options: url = url + '&order_by=' + order_by else: raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options))) if filter is not None: if len(filter) == 2: url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1]) else: raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)') sort_order_options = ['asc', 'desc'] if sort_order is not None: if sort_order in sort_order_options: url = url + '&sort_order=' + sort_order else: raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options))) data, num_results_total = self.__do_series_search(url) if data is None: return data if limit == 0: max_results_needed = num_results_total else: max_results_needed = limit if max_results_needed > self.max_results_per_request: for i in range(1, max_results_needed // self.max_results_per_request + 1): offset = i * self.max_results_per_request next_data, _ = self.__do_series_search(url + '&offset=' + str(offset)) data = data.append(next_data) return data.head(max_results_needed) def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None): """ Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame. Parameters ---------- text : str text to do fulltext search on, e.g., 'Real GDP' limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/series/search?search_text=%s&" % (self.root_url, quote_plus(text)) info = self.__get_search_results(url, limit, order_by, sort_order, filter) return info def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/release/series?release_id=%d" % (self.root_url, release_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for release id: ' + str(release_id)) return info def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a category id. Returns information about matching series in a DataFrame. Parameters ---------- category_id : int category id, e.g., 32145 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/category/series?category_id=%d&" % (self.root_url, category_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for category id: ' + str(category_id)) return info
apache-2.0
HBehrens/puncover
tests/test_backtrace_helper.py
1
4593
import unittest from mock import MagicMock from puncover.backtrace_helper import BacktraceHelper from puncover import collector class TestBacktraceHelper(unittest.TestCase): class FakeCollector(): def __init__(self, symbol_names): self.symbol_names = symbol_names def symbol(self, name, qualified=True): if not qualified and name in self.symbol_names: return {collector.NAME: name, collector.TYPE: collector.TYPE_FUNCTION} return None def setUp(self): pass def test_returns_empty_list(self): r = BacktraceHelper(None) self.assertEqual([], r.derive_function_symbols("")) def test_returns_known_symbols(self): r = BacktraceHelper(TestBacktraceHelper.FakeCollector([ "codepoint_get_horizontal_advance", "text_walk_lines", ])) actual = r.derive_function_symbols(""" fontinfo=0x200010ec <s_system_fonts_info_table+200>) 16 at ../src/fw/applib/graphics/text_resources.c:347 #4 0x08012220 in codepoint_get_horizontal_advance () 16 #5 0x08012602 in walk_line () 112 #6 0x080128d6 in text_walk_lines.constprop.8 () (inlined) """) self.assertEqual(["codepoint_get_horizontal_advance", "text_walk_lines"], [f[collector.NAME] for f in actual]) def test_transform_known_symbols(self): r = BacktraceHelper(TestBacktraceHelper.FakeCollector([ "a", "c", "d", ])) def f(symbol): return symbol[collector.NAME] + symbol[collector.NAME] actual = r.transform_known_symbols("0 1 a b c d e f 0 2", f) self.assertEqual("0 1 aa b cc dd e f 0 2", actual) class TestBacktraceHelperTreeSizes(unittest.TestCase): def setUp(self): self.cc = collector.Collector(None) self.a = self.cc.add_symbol("a", "a", type=collector.TYPE_FUNCTION, stack_size=1) self.b = self.cc.add_symbol("b", "b", type=collector.TYPE_FUNCTION, stack_size=10) self.c = self.cc.add_symbol("c", "c", type=collector.TYPE_FUNCTION, stack_size=100) self.d = self.cc.add_symbol("d", "d", type=collector.TYPE_FUNCTION, stack_size=1000) self.e = self.cc.add_symbol("e", "e", type=collector.TYPE_FUNCTION, stack_size=10000) self.f = self.cc.add_symbol("f", "f", type=collector.TYPE_FUNCTION) self.cc.enhance_call_tree() self.cc.add_function_call(self.a, self.b) self.cc.add_function_call(self.a, self.c) self.cc.add_function_call(self.b, self.a) self.cc.add_function_call(self.c, self.b) self.cc.add_function_call(self.c, self.d) self.cc.add_function_call(self.d, self.e) self.cc.add_function_call(self.d, self.f) self.h = BacktraceHelper(self.cc) def test_leaf_with_stack(self): self.assertEqual((10000, [self.e]), self.h.deepest_callee_tree(self.e)) self.assertIn(collector.DEEPEST_CALLEE_TREE, self.e) def test_leaf_without_stack(self): self.assertEqual((0, [self.f]), self.h.deepest_callee_tree(self.f)) self.assertIn(collector.DEEPEST_CALLEE_TREE, self.f) def test_cached_value(self): self.f[collector.DEEPEST_CALLEE_TREE] = "cached" self.assertEqual("cached", self.h.deepest_callee_tree(self.f)) def test_non_leaf(self): self.assertEqual((11000, [self.d, self.e]), self.h.deepest_callee_tree(self.d)) self.assertIn(collector.DEEPEST_CALLEE_TREE, self.f) self.assertIn(collector.DEEPEST_CALLEE_TREE, self.e) self.assertIn(collector.DEEPEST_CALLEE_TREE, self.d) def test_cycle_2(self): self.a[collector.CALLEES].remove(self.c) expected = (11, [self.a, self.b]) actual = self.h.deepest_callee_tree(self.a) self.assertEqual(expected, actual) expected = (10, [self.b]) actual = self.h.deepest_callee_tree(self.b) self.assertEqual(expected, actual) def test_cycle_3(self): self.c[collector.CALLEES].remove(self.d) self.assertEqual(111, self.h.deepest_callee_tree(self.a)[0]) self.assertEqual(10, self.h.deepest_callee_tree(self.b)[0]) self.assertEqual(110, self.h.deepest_callee_tree(self.c)[0]) def test_caller(self): self.d[collector.CALLERS] = [] self.assertEqual(1000, self.h.deepest_caller_tree(self.f)[0]) self.assertEqual(11000, self.h.deepest_caller_tree(self.e)[0]) def test_caller_cycle(self): self.assertEqual(1111, self.h.deepest_caller_tree(self.f)[0]) self.assertEqual(11111, self.h.deepest_caller_tree(self.e)[0])
mit
paulrouget/servo
tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/packages/rfc3986/api.py
47
3383
# -*- coding: utf-8 -*- # Copyright (c) 2014 Rackspace # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ rfc3986.api ~~~~~~~~~~~ This defines the simple API to rfc3986. This module defines 3 functions and provides access to the class ``URIReference``. """ from .uri import URIReference from .parseresult import ParseResult def uri_reference(uri, encoding='utf-8'): """Parse a URI string into a URIReference. This is a convenience function. You could achieve the same end by using ``URIReference.from_string(uri)``. :param str uri: The URI which needs to be parsed into a reference. :param str encoding: The encoding of the string provided :returns: A parsed URI :rtype: :class:`URIReference` """ return URIReference.from_string(uri, encoding) def is_valid_uri(uri, encoding='utf-8', **kwargs): """Determine if the URI given is valid. This is a convenience function. You could use either ``uri_reference(uri).is_valid()`` or ``URIReference.from_string(uri).is_valid()`` to achieve the same result. :param str uri: The URI to be validated. :param str encoding: The encoding of the string provided :param bool require_scheme: Set to ``True`` if you wish to require the presence of the scheme component. :param bool require_authority: Set to ``True`` if you wish to require the presence of the authority component. :param bool require_path: Set to ``True`` if you wish to require the presence of the path component. :param bool require_query: Set to ``True`` if you wish to require the presence of the query component. :param bool require_fragment: Set to ``True`` if you wish to require the presence of the fragment component. :returns: ``True`` if the URI is valid, ``False`` otherwise. :rtype: bool """ return URIReference.from_string(uri, encoding).is_valid(**kwargs) def normalize_uri(uri, encoding='utf-8'): """Normalize the given URI. This is a convenience function. You could use either ``uri_reference(uri).normalize().unsplit()`` or ``URIReference.from_string(uri).normalize().unsplit()`` instead. :param str uri: The URI to be normalized. :param str encoding: The encoding of the string provided :returns: The normalized URI. :rtype: str """ normalized_reference = URIReference.from_string(uri, encoding).normalize() return normalized_reference.unsplit() def urlparse(uri, encoding='utf-8'): """Parse a given URI and return a ParseResult. This is a partial replacement of the standard library's urlparse function. :param str uri: The URI to be parsed. :param str encoding: The encoding of the string provided. :returns: A parsed URI :rtype: :class:`~rfc3986.parseresult.ParseResult` """ return ParseResult.from_string(uri, encoding, strict=False)
mpl-2.0
CAFans/android_kernel_lge_msm8974
tools/perf/scripts/python/syscall-counts-by-pid.py
11180
1927
# system call counts, by pid # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts-by-pid.py [comm]\n"; for_comm = None for_pid = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: try: for_pid = int(sys.argv[1]) except: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if (for_comm and common_comm != for_comm) or \ (for_pid and common_pid != for_pid ): return try: syscalls[common_comm][common_pid][id] += 1 except TypeError: syscalls[common_comm][common_pid][id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events by comm/pid:\n\n", print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), comm_keys = syscalls.keys() for comm in comm_keys: pid_keys = syscalls[comm].keys() for pid in pid_keys: print "\n%s [%d]\n" % (comm, pid), id_keys = syscalls[comm][pid].keys() for id, val in sorted(syscalls[comm][pid].iteritems(), \ key = lambda(k, v): (v, k), reverse = True): print " %-38s %10d\n" % (syscall_name(id), val),
gpl-2.0
OpusVL/odoo
addons/mrp/mrp.py
1
70571
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, orm from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT from openerp.tools import float_compare from openerp.tools.translate import _ from openerp import tools, SUPERUSER_ID from openerp.addons.product import _common class mrp_property_group(osv.osv): """ Group of mrp properties. """ _name = 'mrp.property.group' _description = 'Property Group' _columns = { 'name': fields.char('Property Group', required=True), 'description': fields.text('Description'), } class mrp_property(osv.osv): """ Properties of mrp. """ _name = 'mrp.property' _description = 'Property' _columns = { 'name': fields.char('Name', required=True), 'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."), 'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True), 'description': fields.text('Description'), } _defaults = { 'composition': lambda *a: 'min', } #---------------------------------------------------------- # Work Centers #---------------------------------------------------------- # capacity_hour : capacity per hour. default: 1.0. # Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees) # unit_per_cycle : how many units are produced for one cycle class mrp_workcenter(osv.osv): _name = 'mrp.workcenter' _description = 'Work Center' _inherits = {'resource.resource':"resource_id"} _columns = { 'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."), 'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."), 'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."), 'time_start': fields.float('Time before prod.', help="Time in hours for the setup."), 'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."), 'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."), 'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')], help="Fill this only if you want automatic analytic accounting entries on production orders."), 'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."), 'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')], help="Fill this only if you want automatic analytic accounting entries on production orders."), 'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'), 'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]), 'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True), 'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."), } _defaults = { 'capacity_per_cycle': 1.0, 'resource_type': 'material', } def on_change_product_cost(self, cr, uid, ids, product_id, context=None): value = {} if product_id: cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context) value = {'costs_hour': cost.standard_price} return {'value': value} class mrp_routing(osv.osv): """ For specifying the routings of Work Centers. """ _name = 'mrp.routing' _description = 'Routing' _columns = { 'name': fields.char('Name', required=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."), 'code': fields.char('Code', size=8), 'note': fields.text('Description'), 'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers', copy=True), 'location_id': fields.many2one('stock.location', 'Production Location', help="Keep empty if you produce at the location where the finished products are needed." \ "Set a location if you produce at a fixed location. This can be a partner location " \ "if you subcontract the manufacturing operations." ), 'company_id': fields.many2one('res.company', 'Company'), } _defaults = { 'active': lambda *a: 1, 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context) } class mrp_routing_workcenter(osv.osv): """ Defines working cycles and hours of a Work Center using routings. """ _name = 'mrp.routing.workcenter' _description = 'Work Center Usage' _order = 'sequence' _columns = { 'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True), 'name': fields.char('Name', required=True), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."), 'cycle_nbr': fields.float('Number of Cycles', required=True, help="Number of iterations this work center has to do in the specified operation of the routing."), 'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."), 'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade', help="Routing indicates all the Work Centers used, for how long and/or cycles." \ "If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."), 'note': fields.text('Description'), 'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True), } _defaults = { 'cycle_nbr': lambda *a: 1.0, 'hour_nbr': lambda *a: 0.0, } class mrp_bom(osv.osv): """ Defines bills of material for a product. """ _name = 'mrp.bom' _description = 'Bill of Material' _inherit = ['mail.thread'] _columns = { 'name': fields.char('Name'), 'code': fields.char('Reference', size=16), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."), 'type': fields.selection([('normal','Manufacture this product'),('phantom','Ship this product as a set of components (kit)')], 'BoM Type', required=True, help= "Set: When processing a sales order for this product, the delivery order will contain the raw materials, instead of the finished product."), 'position': fields.char('Internal Reference', help="Reference to a position in an external plan."), 'product_tmpl_id': fields.many2one('product.template', 'Product', domain="[('type', '!=', 'service')]", required=True), 'product_id': fields.many2one('product.product', 'Product Variant', domain="['&', ('product_tmpl_id','=',product_tmpl_id), ('type','!=', 'service')]", help="If a product variant is defined the BOM is available only for this product."), 'bom_line_ids': fields.one2many('mrp.bom.line', 'bom_id', 'BoM Lines', copy=True), 'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"), 'date_start': fields.date('Valid From', help="Validity of this BoM. Keep empty if it's always valid."), 'date_stop': fields.date('Valid Until', help="Validity of this BoM. Keep empty if it's always valid."), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."), 'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. "\ "The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."), 'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."), 'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% during the production process."), 'property_ids': fields.many2many('mrp.property', string='Properties'), 'company_id': fields.many2one('res.company', 'Company', required=True), } def _get_uom_id(self, cr, uid, *args): return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0] _defaults = { 'active': lambda *a: 1, 'product_qty': lambda *a: 1.0, 'product_efficiency': lambda *a: 1.0, 'product_rounding': lambda *a: 0.0, 'type': lambda *a: 'normal', 'product_uom': _get_uom_id, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c), } _order = "sequence" def _bom_find(self, cr, uid, product_tmpl_id=None, product_id=None, properties=None, context=None): """ Finds BoM for particular product and product uom. @param product_tmpl_id: Selected product. @param product_uom: Unit of measure of a product. @param properties: List of related properties. @return: False or BoM id. """ if properties is None: properties = [] if product_id: if not product_tmpl_id: product_tmpl_id = self.pool['product.product'].browse(cr, uid, product_id, context=context).product_tmpl_id.id domain = [ '|', ('product_id', '=', product_id), '&', ('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id) ] elif product_tmpl_id: domain = [('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id)] else: # neither product nor template, makes no sense to search return False domain = domain + [ '|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), '|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))] # order to prioritize bom with product_id over the one without ids = self.search(cr, uid, domain, order='product_id', context=context) # Search a BoM which has all properties specified, or if you can not find one, you could # pass a BoM without any properties bom_empty_prop = False for bom in self.pool.get('mrp.bom').browse(cr, uid, ids, context=context): if not set(map(int, bom.property_ids or [])) - set(properties or []): if properties and not bom.property_ids: bom_empty_prop = bom.id else: return bom.id return bom_empty_prop def _bom_explode(self, cr, uid, bom, product, factor, properties=None, level=0, routing_id=False, previous_products=None, master_bom=None, context=None): """ Finds Products and Work Centers for related BoM for manufacturing order. @param bom: BoM of particular product template. @param product: Select a particular variant of the BoM. If False use BoM without variants. @param factor: Factor represents the quantity, but in UoM of the BoM, taking into account the numbers produced by the BoM @param properties: A List of properties Ids. @param level: Depth level to find BoM lines starts from 10. @param previous_products: List of product previously use by bom explore to avoid recursion @param master_bom: When recursion, used to display the name of the master bom @return: result: List of dictionaries containing product details. result2: List of dictionaries containing Work Center details. """ uom_obj = self.pool.get("product.uom") routing_obj = self.pool.get('mrp.routing') master_bom = master_bom or bom def _factor(factor, product_efficiency, product_rounding): factor = factor / (product_efficiency or 1.0) factor = _common.ceiling(factor, product_rounding) if factor < product_rounding: factor = product_rounding return factor factor = _factor(factor, bom.product_efficiency, bom.product_rounding) result = [] result2 = [] routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False if routing: for wc_use in routing.workcenter_lines: wc = wc_use.workcenter_id d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle) mult = (d + (m and 1.0 or 0.0)) cycle = mult * wc_use.cycle_nbr result2.append({ 'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_tmpl_id.name_get()[0][1]), 'workcenter_id': wc.id, 'sequence': level + (wc_use.sequence or 0), 'cycle': cycle, 'hour': float(wc_use.hour_nbr * mult + ((wc.time_start or 0.0) + (wc.time_stop or 0.0) + cycle * (wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)), }) for bom_line_id in bom.bom_line_ids: if bom_line_id.date_start and bom_line_id.date_start > time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or \ bom_line_id.date_stop and bom_line_id.date_stop < time.strftime(DEFAULT_SERVER_DATETIME_FORMAT): continue # all bom_line_id variant values must be in the product if bom_line_id.attribute_value_ids: if not product or (set(map(int,bom_line_id.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))): continue if previous_products and bom_line_id.product_id.product_tmpl_id.id in previous_products: raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a BoM line with a product recursion: "%s".') % (master_bom.name,bom_line_id.product_id.name_get()[0][1])) quantity = _factor(bom_line_id.product_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding) bom_id = self._bom_find(cr, uid, product_id=bom_line_id.product_id.id, properties=properties, context=context) #If BoM should not behave like PhantoM, just add the product, otherwise explode further if bom_line_id.type != "phantom" and (not bom_id or self.browse(cr, uid, bom_id, context=context).type != "phantom"): result.append({ 'name': bom_line_id.product_id.name, 'product_id': bom_line_id.product_id.id, 'product_qty': quantity, 'product_uom': bom_line_id.product_uom.id, 'product_uos_qty': bom_line_id.product_uos and _factor(bom_line_id.product_uos_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding) or False, 'product_uos': bom_line_id.product_uos and bom_line_id.product_uos.id or False, }) elif bom_id: all_prod = [bom.product_tmpl_id.id] + (previous_products or []) bom2 = self.browse(cr, uid, bom_id, context=context) # We need to convert to units/UoM of chosen BoM factor2 = uom_obj._compute_qty(cr, uid, bom_line_id.product_uom.id, quantity, bom2.product_uom.id) quantity2 = factor2 / bom2.product_qty res = self._bom_explode(cr, uid, bom2, bom_line_id.product_id, quantity2, properties=properties, level=level + 10, previous_products=all_prod, master_bom=master_bom, context=context) result = result + res[0] result2 = result2 + res[1] else: raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a phantom BoM line but the product "%s" does not have any BoM defined.') % (master_bom.name,bom_line_id.product_id.name_get()[0][1])) return result, result2 def copy_data(self, cr, uid, id, default=None, context=None): if default is None: default = {} bom_data = self.read(cr, uid, id, [], context=context) default.update(name=_("%s (copy)") % (bom_data['name'])) return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context) def onchange_uom(self, cr, uid, ids, product_tmpl_id, product_uom, context=None): res = {'value': {}} if not product_uom or not product_tmpl_id: return res product = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context) uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context) if uom.category_id.id != product.uom_id.category_id.id: res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} res['value'].update({'product_uom': product.uom_id.id}) return res def unlink(self, cr, uid, ids, context=None): if self.pool['mrp.production'].search(cr, uid, [('bom_id', 'in', ids), ('state', 'not in', ['done', 'cancel'])], context=context): raise osv.except_osv(_('Warning!'), _('You can not delete a Bill of Material with running manufacturing orders.\nPlease close or cancel it first.')) return super(mrp_bom, self).unlink(cr, uid, ids, context=context) def onchange_product_tmpl_id(self, cr, uid, ids, product_tmpl_id, product_qty=0, context=None): """ Changes UoM and name if product_id changes. @param product_id: Changed product_id @return: Dictionary of changed values """ res = {} if product_tmpl_id: prod = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context) res['value'] = { 'name': prod.name, 'product_uom': prod.uom_id.id, } return res class mrp_bom_line(osv.osv): _name = 'mrp.bom.line' _order = "sequence" def _get_child_bom_lines(self, cr, uid, ids, field_name, arg, context=None): """If the BOM line refers to a BOM, return the ids of the child BOM lines""" bom_obj = self.pool['mrp.bom'] res = {} for bom_line in self.browse(cr, uid, ids, context=context): bom_id = bom_obj._bom_find(cr, uid, product_tmpl_id=bom_line.product_id.product_tmpl_id.id, product_id=bom_line.product_id.id, context=context) if bom_id: child_bom = bom_obj.browse(cr, uid, bom_id, context=context) res[bom_line.id] = [x.id for x in child_bom.bom_line_ids] else: res[bom_line.id] = False return res _columns = { 'type': fields.selection([('normal', 'Normal'), ('phantom', 'Phantom')], 'BoM Line Type', required=True, help="Phantom: this product line will not appear in the raw materials of manufacturing orders," "it will be directly replaced by the raw materials of its own BoM, without triggering" "an extra manufacturing order."), 'product_id': fields.many2one('product.product', 'Product', required=True), 'product_uos_qty': fields.float('Product UOS Qty'), 'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."), 'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"), 'date_start': fields.date('Valid From', help="Validity of component. Keep empty if it's always valid."), 'date_stop': fields.date('Valid Until', help="Validity of component. Keep empty if it's always valid."), 'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying."), 'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."), 'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."), 'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."), 'property_ids': fields.many2many('mrp.property', string='Properties'), #Not used 'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True, required=True), 'attribute_value_ids': fields.many2many('product.attribute.value', string='Variants', help="BOM Product Variants needed form apply this line."), 'child_line_ids': fields.function(_get_child_bom_lines, relation="mrp.bom.line", string="BOM lines of the referred bom", type="one2many") } def _get_uom_id(self, cr, uid, *args): return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0] _defaults = { 'product_qty': lambda *a: 1.0, 'product_efficiency': lambda *a: 1.0, 'product_rounding': lambda *a: 0.0, 'type': lambda *a: 'normal', 'product_uom': _get_uom_id, } _sql_constraints = [ ('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \ 'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'), ] def create(self, cr, uid, values, context=None): if context is None: context = {} product_obj = self.pool.get('product.product') if 'product_id' in values and not 'product_uom' in values: values['product_uom'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id return super(mrp_bom_line, self).create(cr, uid, values, context=context) def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None): res = {'value': {}} if not product_uom or not product_id: return res product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context) if uom.category_id.id != product.uom_id.category_id.id: res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} res['value'].update({'product_uom': product.uom_id.id}) return res def onchange_product_id(self, cr, uid, ids, product_id, product_qty=0, context=None): """ Changes UoM if product_id changes. @param product_id: Changed product_id @return: Dictionary of changed values """ res = {} if product_id: prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context) res['value'] = { 'product_uom': prod.uom_id.id, 'product_uos_qty': 0, 'product_uos': False } if prod.uos_id.id: res['value']['product_uos_qty'] = product_qty * prod.uos_coeff res['value']['product_uos'] = prod.uos_id.id return res class mrp_production(osv.osv): """ Production Orders / Manufacturing Orders """ _name = 'mrp.production' _description = 'Manufacturing Order' _date_name = 'date_planned' _inherit = ['mail.thread', 'ir.needaction_mixin'] def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None): """ Calculates total hours and total no. of cycles for a production order. @param prop: Name of field. @param unknow_none: @return: Dictionary of values. """ result = {} for prod in self.browse(cr, uid, ids, context=context): result[prod.id] = { 'hour_total': 0.0, 'cycle_total': 0.0, } for wc in prod.workcenter_lines: result[prod.id]['hour_total'] += wc.hour result[prod.id]['cycle_total'] += wc.cycle return result def _src_id_default(self, cr, uid, ids, context=None): try: location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock') self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context) except (orm.except_orm, ValueError): location_id = False return location_id def _dest_id_default(self, cr, uid, ids, context=None): try: location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock') self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context) except (orm.except_orm, ValueError): location_id = False return location_id def _get_progress(self, cr, uid, ids, name, arg, context=None): """ Return product quantity percentage """ result = dict.fromkeys(ids, 100) for mrp_production in self.browse(cr, uid, ids, context=context): if mrp_production.product_qty: done = 0.0 for move in mrp_production.move_created_ids2: if not move.scrapped and move.product_id == mrp_production.product_id: done += move.product_qty result[mrp_production.id] = done / mrp_production.product_qty * 100 return result def _moves_assigned(self, cr, uid, ids, name, arg, context=None): """ Test whether all the consume lines are assigned """ res = {} for production in self.browse(cr, uid, ids, context=context): res[production.id] = True states = [x.state != 'assigned' for x in production.move_lines if x] if any(states) or len(states) == 0: #When no moves, ready_production will be False, but test_ready will pass res[production.id] = False return res def _mrp_from_move(self, cr, uid, ids, context=None): """ Return mrp""" res = [] for move in self.browse(cr, uid, ids, context=context): res += self.pool.get("mrp.production").search(cr, uid, [('move_lines', 'in', move.id)], context=context) return res _columns = { 'name': fields.char('Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, copy=False), 'origin': fields.char('Source Document', readonly=True, states={'draft': [('readonly', False)]}, help="Reference of the document that generated this production order request.", copy=False), 'priority': fields.selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority', select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])), 'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]}, domain=[('type','!=','service')]), 'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}), 'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}), 'progress': fields.function(_get_progress, type='float', string='Production progress'), 'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Location where the system will look for components."), 'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Location where the system will stock the finished products."), 'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft': [('readonly', False)]}, copy=False), 'date_start': fields.datetime('Start Date', select=True, readonly=True, copy=False), 'date_finished': fields.datetime('End Date', select=True, readonly=True, copy=False), 'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True, states={'draft': [('readonly', False)]}, help="Bill of Materials allow you to define the list of required raw materials to make a finished product."), 'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft': [('readonly', False)]}, help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."), 'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True, copy=False), 'move_lines': fields.one2many('stock.move', 'raw_material_production_id', 'Products to Consume', domain=[('state', 'not in', ('done', 'cancel'))], readonly=True, states={'draft': [('readonly', False)]}), 'move_lines2': fields.one2many('stock.move', 'raw_material_production_id', 'Consumed Products', domain=[('state', 'in', ('done', 'cancel'))], readonly=True), 'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce', domain=[('state', 'not in', ('done', 'cancel'))], readonly=True), 'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products', domain=[('state', 'in', ('done', 'cancel'))], readonly=True), 'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods', readonly=True), 'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation', readonly=True, states={'draft': [('readonly', False)]}), 'state': fields.selection( [('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'), ('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')], string='Status', readonly=True, track_visibility='onchange', copy=False, help="When the production order is created the status is set to 'Draft'.\n\ If the order is confirmed the status is set to 'Waiting Goods'.\n\ If any exceptions are there, the status is set to 'Picking Exception'.\n\ If the stock is available then the status is set to 'Ready to Produce'.\n\ When the production gets started then the status is set to 'In Production'.\n\ When the production is over, the status is set to 'Done'."), 'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store=True), 'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store=True), 'user_id': fields.many2one('res.users', 'Responsible'), 'company_id': fields.many2one('res.company', 'Company', required=True), 'ready_production': fields.function(_moves_assigned, type='boolean', string="Ready for production", store={'stock.move': (_mrp_from_move, ['state'], 10)}), } _defaults = { 'priority': lambda *a: '1', 'state': lambda *a: 'draft', 'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), 'product_qty': lambda *a: 1.0, 'user_id': lambda self, cr, uid, c: uid, 'name': lambda x, y, z, c: x.pool.get('ir.sequence').next_by_code(y, z, 'mrp.production') or '/', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c), 'location_src_id': _src_id_default, 'location_dest_id': _dest_id_default } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'), ] _order = 'priority desc, date_planned asc' def _check_qty(self, cr, uid, ids, context=None): for order in self.browse(cr, uid, ids, context=context): if order.product_qty <= 0: return False return True _constraints = [ (_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']), ] def create(self, cr, uid, values, context=None): if context is None: context = {} product_obj = self.pool.get('product.product') if 'product_id' in values and not 'product_uom' in values: values['product_uom'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id return super(mrp_production, self).create(cr, uid, values, context=context) def unlink(self, cr, uid, ids, context=None): for production in self.browse(cr, uid, ids, context=context): if production.state not in ('draft', 'cancel'): raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state) return super(mrp_production, self).unlink(cr, uid, ids, context=context) def location_id_change(self, cr, uid, ids, src, dest, context=None): """ Changes destination location if source location is changed. @param src: Source location id. @param dest: Destination location id. @return: Dictionary of values. """ if dest: return {} if src: return {'value': {'location_dest_id': src}} return {} def product_id_change(self, cr, uid, ids, product_id, product_qty=0, context=None): """ Finds UoM of changed product. @param product_id: Id of changed product. @return: Dictionary of values. """ result = {} if not product_id: return {'value': { 'product_uom': False, 'bom_id': False, 'routing_id': False, 'product_uos_qty': 0, 'product_uos': False }} bom_obj = self.pool.get('mrp.bom') product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) bom_id = bom_obj._bom_find(cr, uid, product_id=product.id, properties=[], context=context) routing_id = False if bom_id: bom_point = bom_obj.browse(cr, uid, bom_id, context=context) routing_id = bom_point.routing_id.id or False product_uom_id = product.uom_id and product.uom_id.id or False result['value'] = {'product_uos_qty': 0, 'product_uos': False, 'product_uom': product_uom_id, 'bom_id': bom_id, 'routing_id': routing_id} if product.uos_id.id: result['value']['product_uos_qty'] = product_qty * product.uos_coeff result['value']['product_uos'] = product.uos_id.id return result def bom_id_change(self, cr, uid, ids, bom_id, context=None): """ Finds routing for changed BoM. @param product: Id of product. @return: Dictionary of values. """ if not bom_id: return {'value': { 'routing_id': False }} bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context) routing_id = bom_point.routing_id.id or False result = { 'routing_id': routing_id } return {'value': result} def _action_compute_lines(self, cr, uid, ids, properties=None, context=None): """ Compute product_lines and workcenter_lines from BoM structure @return: product_lines """ if properties is None: properties = [] results = [] bom_obj = self.pool.get('mrp.bom') uom_obj = self.pool.get('product.uom') prod_line_obj = self.pool.get('mrp.production.product.line') workcenter_line_obj = self.pool.get('mrp.production.workcenter.line') for production in self.browse(cr, uid, ids, context=context): #unlink product_lines prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context) #unlink workcenter_lines workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context) # search BoM structure and route bom_point = production.bom_id bom_id = production.bom_id.id if not bom_point: bom_id = bom_obj._bom_find(cr, uid, product_id=production.product_id.id, properties=properties, context=context) if bom_id: bom_point = bom_obj.browse(cr, uid, bom_id) routing_id = bom_point.routing_id.id or False self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id}) if not bom_id: raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product.")) # get components and workcenter_lines from BoM structure factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id) # product_lines, workcenter_lines results, results2 = bom_obj._bom_explode(cr, uid, bom_point, production.product_id, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id, context=context) # reset product_lines in production order for line in results: line['production_id'] = production.id prod_line_obj.create(cr, uid, line) #reset workcenter_lines in production order for line in results2: line['production_id'] = production.id workcenter_line_obj.create(cr, uid, line) return results def action_compute(self, cr, uid, ids, properties=None, context=None): """ Computes bills of material of a product. @param properties: List containing dictionaries of properties. @return: No. of products. """ return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context)) def action_cancel(self, cr, uid, ids, context=None): """ Cancels the production order and related stock moves. @return: True """ if context is None: context = {} move_obj = self.pool.get('stock.move') proc_obj = self.pool.get('procurement.order') for production in self.browse(cr, uid, ids, context=context): if production.move_created_ids: move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids]) procs = proc_obj.search(cr, uid, [('move_dest_id', 'in', [x.id for x in production.move_lines])], context=context) if procs: proc_obj.cancel(cr, uid, procs, context=context) move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines]) self.write(cr, uid, ids, {'state': 'cancel'}) # Put related procurements in exception proc_obj = self.pool.get("procurement.order") procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context) if procs: proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context) return True def action_ready(self, cr, uid, ids, context=None): """ Changes the production state to Ready and location id of stock move. @return: True """ move_obj = self.pool.get('stock.move') self.write(cr, uid, ids, {'state': 'ready'}) for production in self.browse(cr, uid, ids, context=context): if not production.move_created_ids: self._make_production_produce_line(cr, uid, production, context=context) if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id: move_obj.write(cr, uid, [production.move_prod_id.id], {'location_id': production.location_dest_id.id}) return True def action_production_end(self, cr, uid, ids, context=None): """ Changes production state to Finish and writes finished date. @return: True """ for production in self.browse(cr, uid, ids): self._costs_generate(cr, uid, production) write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')}) # Check related procurements proc_obj = self.pool.get("procurement.order") procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context) proc_obj.check(cr, uid, procs, context=context) return write_res def test_production_done(self, cr, uid, ids): """ Tests whether production is done or not. @return: True or False """ res = True for production in self.browse(cr, uid, ids): if production.move_lines: res = False if production.move_created_ids: res = False return res def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None): """ Compute the factor to compute the qty of procucts to produce for the given production_id. By default, it's always equal to the quantity encoded in the production order or the production wizard, but if the module mrp_subproduct is installed, then we must use the move_id to identify the product to produce and its quantity. :param production_id: ID of the mrp.order :param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct. :return: The factor to apply to the quantity that we should produce for the given production order. """ return 1 def _get_produced_qty(self, cr, uid, production, context=None): ''' returns the produced quantity of product 'production.product_id' for the given production, in the product UoM ''' produced_qty = 0 for produced_product in production.move_created_ids2: if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id): continue produced_qty += produced_product.product_qty return produced_qty def _get_consumed_data(self, cr, uid, production, context=None): ''' returns a dictionary containing for each raw material of the given production, its quantity already consumed (in the raw material UoM) ''' consumed_data = {} # Calculate already consumed qtys for consumed in production.move_lines2: if consumed.scrapped: continue if not consumed_data.get(consumed.product_id.id, False): consumed_data[consumed.product_id.id] = 0 consumed_data[consumed.product_id.id] += consumed.product_qty return consumed_data def _calculate_qty(self, cr, uid, production, product_qty=0.0, context=None): """ Calculates the quantity still needed to produce an extra number of products product_qty is in the uom of the product """ quant_obj = self.pool.get("stock.quant") uom_obj = self.pool.get("product.uom") produced_qty = self._get_produced_qty(cr, uid, production, context=context) consumed_data = self._get_consumed_data(cr, uid, production, context=context) #In case no product_qty is given, take the remaining qty to produce for the given production if not product_qty: product_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id) - produced_qty production_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id) scheduled_qty = {} for scheduled in production.product_lines: if scheduled.product_id.type == 'service': continue qty = uom_obj._compute_qty(cr, uid, scheduled.product_uom.id, scheduled.product_qty, scheduled.product_id.uom_id.id) if scheduled_qty.get(scheduled.product_id.id): scheduled_qty[scheduled.product_id.id] += qty else: scheduled_qty[scheduled.product_id.id] = qty dicts = {} # Find product qty to be consumed and consume it for product_id in scheduled_qty.keys(): consumed_qty = consumed_data.get(product_id, 0.0) # qty available for consume and produce sched_product_qty = scheduled_qty[product_id] qty_avail = sched_product_qty - consumed_qty if qty_avail <= 0.0: # there will be nothing to consume for this raw material continue if not dicts.get(product_id): dicts[product_id] = {} # total qty of consumed product we need after this consumption if product_qty + produced_qty <= production_qty: total_consume = ((product_qty + produced_qty) * sched_product_qty / production_qty) else: total_consume = sched_product_qty qty = total_consume - consumed_qty # Search for quants related to this related move for move in production.move_lines: if qty <= 0.0: break if move.product_id.id != product_id: continue q = min(move.product_qty, qty) quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, q, domain=[('qty', '>', 0.0)], prefered_domain_list=[[('reservation_id', '=', move.id)]], context=context) for quant, quant_qty in quants: if quant: lot_id = quant.lot_id.id if not product_id in dicts.keys(): dicts[product_id] = {lot_id: quant_qty} elif lot_id in dicts[product_id].keys(): dicts[product_id][lot_id] += quant_qty else: dicts[product_id][lot_id] = quant_qty qty -= quant_qty if qty > 0: if dicts[product_id].get(False): dicts[product_id][False] += qty else: dicts[product_id][False] = qty consume_lines = [] for prod in dicts.keys(): for lot, qty in dicts[prod].items(): consume_lines.append({'product_id': prod, 'product_qty': qty, 'lot_id': lot}) return consume_lines def action_produce(self, cr, uid, production_id, production_qty, production_mode, wiz=False, context=None): """ To produce final product based on production mode (consume/consume&produce). If Production mode is consume, all stock move lines of raw materials will be done/consumed. If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed and stock move lines of final product will be also done/produced. @param production_id: the ID of mrp.production object @param production_qty: specify qty to produce in the uom of the production order @param production_mode: specify production mode (consume/consume&produce). @param wiz: the mrp produce product wizard, which will tell the amount of consumed products needed @return: True """ stock_mov_obj = self.pool.get('stock.move') uom_obj = self.pool.get("product.uom") production = self.browse(cr, uid, production_id, context=context) production_qty_uom = uom_obj._compute_qty(cr, uid, production.product_uom.id, production_qty, production.product_id.uom_id.id) main_production_move = False if production_mode == 'consume_produce': # To produce remaining qty of final product produced_products = {} for produced_product in production.move_created_ids2: if produced_product.scrapped: continue if not produced_products.get(produced_product.product_id.id, False): produced_products[produced_product.product_id.id] = 0 produced_products[produced_product.product_id.id] += produced_product.product_qty for produce_product in production.move_created_ids: subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context) lot_id = False if wiz: lot_id = wiz.lot_id.id qty = min(subproduct_factor * production_qty_uom, produce_product.product_qty) #Needed when producing more than maximum quantity new_moves = stock_mov_obj.action_consume(cr, uid, [produce_product.id], qty, location_id=produce_product.location_id.id, restrict_lot_id=lot_id, context=context) stock_mov_obj.write(cr, uid, new_moves, {'production_id': production_id}, context=context) remaining_qty = subproduct_factor * production_qty_uom - qty if remaining_qty: # In case you need to make more than planned #consumed more in wizard than previously planned extra_move_id = stock_mov_obj.copy(cr, uid, produce_product.id, default={'state': 'confirmed', 'product_uom_qty': remaining_qty, 'production_id': production_id}, context=context) if extra_move_id: stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context) if produce_product.product_id.id == production.product_id.id: main_production_move = produce_product.id if production_mode in ['consume', 'consume_produce']: if wiz: consume_lines = [] for cons in wiz.consume_lines: consume_lines.append({'product_id': cons.product_id.id, 'lot_id': cons.lot_id.id, 'product_qty': cons.product_qty}) else: consume_lines = self._calculate_qty(cr, uid, production, production_qty_uom, context=context) for consume in consume_lines: remaining_qty = consume['product_qty'] for raw_material_line in production.move_lines: if remaining_qty <= 0: break if consume['product_id'] != raw_material_line.product_id.id: continue consumed_qty = min(remaining_qty, raw_material_line.product_qty) stock_mov_obj.action_consume(cr, uid, [raw_material_line.id], consumed_qty, raw_material_line.location_id.id, restrict_lot_id=consume['lot_id'], consumed_for=main_production_move, context=context) remaining_qty -= consumed_qty if remaining_qty: #consumed more in wizard than previously planned product = self.pool.get('product.product').browse(cr, uid, consume['product_id'], context=context) extra_move_id = self._make_consume_line_from_data(cr, uid, production, product, product.uom_id.id, remaining_qty, False, 0, context=context) if extra_move_id: if consume['lot_id']: stock_mov_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': consume['lot_id']}, context=context) stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context) self.message_post(cr, uid, production_id, body=_("%s produced") % self._description, context=context) self.signal_workflow(cr, uid, [production_id], 'button_produce_done') return True def _costs_generate(self, cr, uid, production): """ Calculates total costs at the end of the production. @param production: Id of production order. @return: Calculated amount. """ amount = 0.0 analytic_line_obj = self.pool.get('account.analytic.line') for wc_line in production.workcenter_lines: wc = wc_line.workcenter_id if wc.costs_journal_id and wc.costs_general_account_id: # Cost per hour value = wc_line.hour * wc.costs_hour account = wc.costs_hour_account_id.id if value and account: amount += value # we user SUPERUSER_ID as we do not garantee an mrp user # has access to account analytic lines but still should be # able to produce orders analytic_line_obj.create(cr, SUPERUSER_ID, { 'name': wc_line.name + ' (H)', 'amount': value, 'account_id': account, 'general_account_id': wc.costs_general_account_id.id, 'journal_id': wc.costs_journal_id.id, 'ref': wc.code, 'product_id': wc.product_id.id, 'unit_amount': wc_line.hour, 'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False }) # Cost per cycle value = wc_line.cycle * wc.costs_cycle account = wc.costs_cycle_account_id.id if value and account: amount += value analytic_line_obj.create(cr, SUPERUSER_ID, { 'name': wc_line.name + ' (C)', 'amount': value, 'account_id': account, 'general_account_id': wc.costs_general_account_id.id, 'journal_id': wc.costs_journal_id.id, 'ref': wc.code, 'product_id': wc.product_id.id, 'unit_amount': wc_line.cycle, 'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False }) return amount def action_in_production(self, cr, uid, ids, context=None): """ Changes state to In Production and writes starting date. @return: True """ return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}) def consume_lines_get(self, cr, uid, ids, *args): res = [] for order in self.browse(cr, uid, ids, context={}): res += [x.id for x in order.move_lines] return res def test_ready(self, cr, uid, ids): res = True for production in self.browse(cr, uid, ids): if production.move_lines and not production.ready_production: res = False return res def _make_production_produce_line(self, cr, uid, production, context=None): stock_move = self.pool.get('stock.move') proc_obj = self.pool.get('procurement.order') source_location_id = production.product_id.property_stock_production.id destination_location_id = production.location_dest_id.id procs = proc_obj.search(cr, uid, [('production_id', '=', production.id)], context=context) procurement = procs and\ proc_obj.browse(cr, uid, procs[0], context=context) or False data = { 'name': production.name, 'date': production.date_planned, 'product_id': production.product_id.id, 'product_uom': production.product_uom.id, 'product_uom_qty': production.product_qty, 'product_uos_qty': production.product_uos and production.product_uos_qty or False, 'product_uos': production.product_uos and production.product_uos.id or False, 'location_id': source_location_id, 'location_dest_id': destination_location_id, 'move_dest_id': production.move_prod_id.id, 'procurement_id': procurement and procurement.id, 'company_id': production.company_id.id, 'production_id': production.id, 'origin': production.name, 'group_id': procurement and procurement.group_id.id, } move_id = stock_move.create(cr, uid, data, context=context) #a phantom bom cannot be used in mrp order so it's ok to assume the list returned by action_confirm #is 1 element long, so we can take the first. return stock_move.action_confirm(cr, uid, [move_id], context=context)[0] def _get_raw_material_procure_method(self, cr, uid, product, context=None): '''This method returns the procure_method to use when creating the stock move for the production raw materials''' warehouse_obj = self.pool['stock.warehouse'] try: mto_route = warehouse_obj._get_mto_route(cr, uid, context=context) except: return "make_to_stock" routes = product.route_ids + product.categ_id.total_route_ids if mto_route in [x.id for x in routes]: return "make_to_order" return "make_to_stock" def _create_previous_move(self, cr, uid, move_id, product, source_location_id, dest_location_id, context=None): ''' When the routing gives a different location than the raw material location of the production order, we should create an extra move from the raw material location to the location of the routing, which precedes the consumption line (chained). The picking type depends on the warehouse in which this happens and the type of locations. ''' loc_obj = self.pool.get("stock.location") stock_move = self.pool.get('stock.move') type_obj = self.pool.get('stock.picking.type') # Need to search for a picking type move = stock_move.browse(cr, uid, move_id, context=context) src_loc = loc_obj.browse(cr, uid, source_location_id, context=context) dest_loc = loc_obj.browse(cr, uid, dest_location_id, context=context) code = stock_move.get_code_from_locs(cr, uid, move, src_loc, dest_loc, context=context) if code == 'outgoing': check_loc = src_loc else: check_loc = dest_loc wh = loc_obj.get_warehouse(cr, uid, check_loc, context=context) domain = [('code', '=', code)] if wh: domain += [('warehouse_id', '=', wh)] types = type_obj.search(cr, uid, domain, context=context) move = stock_move.copy(cr, uid, move_id, default = { 'location_id': source_location_id, 'location_dest_id': dest_location_id, 'procure_method': self._get_raw_material_procure_method(cr, uid, product, context=context), 'raw_material_production_id': False, 'move_dest_id': move_id, 'picking_type_id': types and types[0] or False, }, context=context) return move def _make_consume_line_from_data(self, cr, uid, production, product, uom_id, qty, uos_id, uos_qty, context=None): stock_move = self.pool.get('stock.move') loc_obj = self.pool.get('stock.location') # Internal shipment is created for Stockable and Consumer Products if product.type not in ('product', 'consu'): return False # Take routing location as a Source Location. source_location_id = production.location_src_id.id prod_location_id = source_location_id prev_move= False if production.bom_id.routing_id and production.bom_id.routing_id.location_id and production.bom_id.routing_id.location_id.id != source_location_id: source_location_id = production.bom_id.routing_id.location_id.id prev_move = True destination_location_id = production.product_id.property_stock_production.id move_id = stock_move.create(cr, uid, { 'name': production.name, 'date': production.date_planned, 'product_id': product.id, 'product_uom_qty': qty, 'product_uom': uom_id, 'product_uos_qty': uos_id and uos_qty or False, 'product_uos': uos_id or False, 'location_id': source_location_id, 'location_dest_id': destination_location_id, 'company_id': production.company_id.id, 'procure_method': prev_move and 'make_to_stock' or self._get_raw_material_procure_method(cr, uid, product, context=context), #Make_to_stock avoids creating procurement 'raw_material_production_id': production.id, #this saves us a browse in create() 'price_unit': product.standard_price, 'origin': production.name, 'warehouse_id': loc_obj.get_warehouse(cr, uid, production.location_src_id, context=context), 'group_id': production.move_prod_id.group_id.id, }, context=context) if prev_move: prev_move = self._create_previous_move(cr, uid, move_id, product, prod_location_id, source_location_id, context=context) stock_move.action_confirm(cr, uid, [prev_move], context=context) return move_id def _make_production_consume_line(self, cr, uid, line, context=None): return self._make_consume_line_from_data(cr, uid, line.production_id, line.product_id, line.product_uom.id, line.product_qty, line.product_uos.id, line.product_uos_qty, context=context) def _make_service_procurement(self, cr, uid, line, context=None): prod_obj = self.pool.get('product.product') if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context): vals = { 'name': line.production_id.name, 'origin': line.production_id.name, 'company_id': line.production_id.company_id.id, 'date_planned': line.production_id.date_planned, 'product_id': line.product_id.id, 'product_qty': line.product_qty, 'product_uom': line.product_uom.id, 'product_uos_qty': line.product_uos_qty, 'product_uos': line.product_uos.id, } proc_obj = self.pool.get("procurement.order") proc = proc_obj.create(cr, uid, vals, context=context) proc_obj.run(cr, uid, [proc], context=context) def action_confirm(self, cr, uid, ids, context=None): """ Confirms production order. @return: Newly generated Shipment Id. """ uncompute_ids = filter(lambda x: x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)]) self.action_compute(cr, uid, uncompute_ids, context=context) for production in self.browse(cr, uid, ids, context=context): self._make_production_produce_line(cr, uid, production, context=context) stock_moves = [] for line in production.product_lines: if line.product_id.type != 'service': stock_move_id = self._make_production_consume_line(cr, uid, line, context=context) stock_moves.append(stock_move_id) else: self._make_service_procurement(cr, uid, line, context=context) if stock_moves: self.pool.get('stock.move').action_confirm(cr, uid, stock_moves, context=context) production.write({'state': 'confirmed'}) return 0 def action_assign(self, cr, uid, ids, context=None): """ Checks the availability on the consume lines of the production order """ from openerp import workflow move_obj = self.pool.get("stock.move") for production in self.browse(cr, uid, ids, context=context): move_obj.action_assign(cr, uid, [x.id for x in production.move_lines], context=context) if self.pool.get('mrp.production').test_ready(cr, uid, [production.id]): workflow.trg_validate(uid, 'mrp.production', production.id, 'moves_ready', cr) def force_production(self, cr, uid, ids, *args): """ Assigns products. @param *args: Arguments @return: True """ from openerp import workflow move_obj = self.pool.get('stock.move') for order in self.browse(cr, uid, ids): move_obj.force_assign(cr, uid, [x.id for x in order.move_lines]) if self.pool.get('mrp.production').test_ready(cr, uid, [order.id]): workflow.trg_validate(uid, 'mrp.production', order.id, 'moves_ready', cr) return True class mrp_production_workcenter_line(osv.osv): _name = 'mrp.production.workcenter.line' _description = 'Work Order' _order = 'sequence' _inherit = ['mail.thread'] _columns = { 'name': fields.char('Work Order', required=True), 'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True), 'cycle': fields.float('Number of Cycles', digits=(16, 2)), 'hour': fields.float('Number of Hours', digits=(16, 2)), 'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."), 'production_id': fields.many2one('mrp.production', 'Manufacturing Order', track_visibility='onchange', select=True, ondelete='cascade', required=True), } _defaults = { 'sequence': lambda *a: 1, 'hour': lambda *a: 0, 'cycle': lambda *a: 0, } class mrp_production_product_line(osv.osv): _name = 'mrp.production.product.line' _description = 'Production Scheduled Product' _columns = { 'name': fields.char('Name', required=True), 'product_id': fields.many2one('product.product', 'Product', required=True), 'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'product_uos_qty': fields.float('Product UOS Quantity'), 'product_uos': fields.many2one('product.uom', 'Product UOS'), 'production_id': fields.many2one('mrp.production', 'Production Order', select=True), }
agpl-3.0
justajeffy/arsenalsuite
cpp/lib/PyQt4/examples/designer/calculatorform/ui_calculatorform.py
18
5739
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'calculatorform.ui' # # Created: Mon Jan 23 13:21:45 2006 # by: PyQt4 UI code generator vsnapshot-20060120 # # WARNING! All changes made in this file will be lost! import sys from PyQt4 import QtCore, QtGui class Ui_CalculatorForm(object): def setupUi(self, CalculatorForm): CalculatorForm.setObjectName("CalculatorForm") CalculatorForm.resize(QtCore.QSize(QtCore.QRect(0,0,400,300).size()).expandedTo(CalculatorForm.minimumSizeHint())) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Policy(5),QtGui.QSizePolicy.Policy(5)) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(CalculatorForm.sizePolicy().hasHeightForWidth()) CalculatorForm.setSizePolicy(sizePolicy) self.gridlayout = QtGui.QGridLayout(CalculatorForm) self.gridlayout.setMargin(9) self.gridlayout.setSpacing(6) self.gridlayout.setObjectName("gridlayout") spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum) self.gridlayout.addItem(spacerItem,0,6,1,1) self.label_3_2 = QtGui.QLabel(CalculatorForm) self.label_3_2.setGeometry(QtCore.QRect(169,9,20,52)) self.label_3_2.setAlignment(QtCore.Qt.AlignCenter) self.label_3_2.setObjectName("label_3_2") self.gridlayout.addWidget(self.label_3_2,0,4,1,1) self.vboxlayout = QtGui.QVBoxLayout() self.vboxlayout.setMargin(1) self.vboxlayout.setSpacing(6) self.vboxlayout.setObjectName("vboxlayout") self.label_2_2_2 = QtGui.QLabel(CalculatorForm) self.label_2_2_2.setGeometry(QtCore.QRect(1,1,36,17)) self.label_2_2_2.setObjectName("label_2_2_2") self.vboxlayout.addWidget(self.label_2_2_2) self.outputWidget = QtGui.QLabel(CalculatorForm) self.outputWidget.setGeometry(QtCore.QRect(1,24,36,27)) self.outputWidget.setFrameShape(QtGui.QFrame.Box) self.outputWidget.setFrameShadow(QtGui.QFrame.Sunken) self.outputWidget.setAlignment(QtCore.Qt.AlignAbsolute|QtCore.Qt.AlignBottom|QtCore.Qt.AlignCenter|QtCore.Qt.AlignHCenter|QtCore.Qt.AlignHorizontal_Mask|QtCore.Qt.AlignJustify|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignVertical_Mask) self.outputWidget.setObjectName("outputWidget") self.vboxlayout.addWidget(self.outputWidget) self.gridlayout.addLayout(self.vboxlayout,0,5,1,1) spacerItem1 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding) self.gridlayout.addItem(spacerItem1,1,2,1,1) self.vboxlayout1 = QtGui.QVBoxLayout() self.vboxlayout1.setMargin(1) self.vboxlayout1.setSpacing(6) self.vboxlayout1.setObjectName("vboxlayout1") self.label_2 = QtGui.QLabel(CalculatorForm) self.label_2.setGeometry(QtCore.QRect(1,1,46,19)) self.label_2.setObjectName("label_2") self.vboxlayout1.addWidget(self.label_2) self.inputSpinBox2 = QtGui.QSpinBox(CalculatorForm) self.inputSpinBox2.setGeometry(QtCore.QRect(1,26,46,25)) self.inputSpinBox2.setObjectName("inputSpinBox2") self.vboxlayout1.addWidget(self.inputSpinBox2) self.gridlayout.addLayout(self.vboxlayout1,0,3,1,1) self.label_3 = QtGui.QLabel(CalculatorForm) self.label_3.setGeometry(QtCore.QRect(63,9,20,52)) self.label_3.setAlignment(QtCore.Qt.AlignCenter) self.label_3.setObjectName("label_3") self.gridlayout.addWidget(self.label_3,0,1,1,1) self.vboxlayout2 = QtGui.QVBoxLayout() self.vboxlayout2.setMargin(1) self.vboxlayout2.setSpacing(6) self.vboxlayout2.setObjectName("vboxlayout2") self.label = QtGui.QLabel(CalculatorForm) self.label.setGeometry(QtCore.QRect(1,1,46,19)) self.label.setObjectName("label") self.vboxlayout2.addWidget(self.label) self.inputSpinBox1 = QtGui.QSpinBox(CalculatorForm) self.inputSpinBox1.setGeometry(QtCore.QRect(1,26,46,25)) self.inputSpinBox1.setObjectName("inputSpinBox1") self.vboxlayout2.addWidget(self.inputSpinBox1) self.gridlayout.addLayout(self.vboxlayout2,0,0,1,1) self.retranslateUi(CalculatorForm) QtCore.QMetaObject.connectSlotsByName(CalculatorForm) def tr(self, string): return QtGui.QApplication.translate("CalculatorForm", string, None, QtGui.QApplication.UnicodeUTF8) def retranslateUi(self, CalculatorForm): CalculatorForm.setObjectName(self.tr("CalculatorForm")) CalculatorForm.setWindowTitle(self.tr("Calculator Form")) self.label_3_2.setObjectName(self.tr("label_3_2")) self.label_3_2.setText(self.tr("=")) self.label_2_2_2.setObjectName(self.tr("label_2_2_2")) self.label_2_2_2.setText(self.tr("Output")) self.outputWidget.setObjectName(self.tr("outputWidget")) self.outputWidget.setText(self.tr("0")) self.label_2.setObjectName(self.tr("label_2")) self.label_2.setText(self.tr("Input 2")) self.inputSpinBox2.setObjectName(self.tr("inputSpinBox2")) self.label_3.setObjectName(self.tr("label_3")) self.label_3.setText(self.tr("+")) self.label.setObjectName(self.tr("label")) self.label.setText(self.tr("Input 1")) self.inputSpinBox1.setObjectName(self.tr("inputSpinBox1"))
gpl-2.0
isrohutamahopetechnik/MissionPlanner
Lib/encodings/cp1026.py
93
13676
""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1026', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x9c' # 0x04 -> CONTROL u'\t' # 0x05 -> HORIZONTAL TABULATION u'\x86' # 0x06 -> CONTROL u'\x7f' # 0x07 -> DELETE u'\x97' # 0x08 -> CONTROL u'\x8d' # 0x09 -> CONTROL u'\x8e' # 0x0A -> CONTROL u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x9d' # 0x14 -> CONTROL u'\x85' # 0x15 -> CONTROL u'\x08' # 0x16 -> BACKSPACE u'\x87' # 0x17 -> CONTROL u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x92' # 0x1A -> CONTROL u'\x8f' # 0x1B -> CONTROL u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u'\x80' # 0x20 -> CONTROL u'\x81' # 0x21 -> CONTROL u'\x82' # 0x22 -> CONTROL u'\x83' # 0x23 -> CONTROL u'\x84' # 0x24 -> CONTROL u'\n' # 0x25 -> LINE FEED u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK u'\x1b' # 0x27 -> ESCAPE u'\x88' # 0x28 -> CONTROL u'\x89' # 0x29 -> CONTROL u'\x8a' # 0x2A -> CONTROL u'\x8b' # 0x2B -> CONTROL u'\x8c' # 0x2C -> CONTROL u'\x05' # 0x2D -> ENQUIRY u'\x06' # 0x2E -> ACKNOWLEDGE u'\x07' # 0x2F -> BELL u'\x90' # 0x30 -> CONTROL u'\x91' # 0x31 -> CONTROL u'\x16' # 0x32 -> SYNCHRONOUS IDLE u'\x93' # 0x33 -> CONTROL u'\x94' # 0x34 -> CONTROL u'\x95' # 0x35 -> CONTROL u'\x96' # 0x36 -> CONTROL u'\x04' # 0x37 -> END OF TRANSMISSION u'\x98' # 0x38 -> CONTROL u'\x99' # 0x39 -> CONTROL u'\x9a' # 0x3A -> CONTROL u'\x9b' # 0x3B -> CONTROL u'\x14' # 0x3C -> DEVICE CONTROL FOUR u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE u'\x9e' # 0x3E -> CONTROL u'\x1a' # 0x3F -> SUBSTITUTE u' ' # 0x40 -> SPACE u'\xa0' # 0x41 -> NO-BREAK SPACE u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE u'{' # 0x48 -> LEFT CURLY BRACKET u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA u'.' # 0x4B -> FULL STOP u'<' # 0x4C -> LESS-THAN SIGN u'(' # 0x4D -> LEFT PARENTHESIS u'+' # 0x4E -> PLUS SIGN u'!' # 0x4F -> EXCLAMATION MARK u'&' # 0x50 -> AMPERSAND u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE u'*' # 0x5C -> ASTERISK u')' # 0x5D -> RIGHT PARENTHESIS u';' # 0x5E -> SEMICOLON u'^' # 0x5F -> CIRCUMFLEX ACCENT u'-' # 0x60 -> HYPHEN-MINUS u'/' # 0x61 -> SOLIDUS u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'[' # 0x68 -> LEFT SQUARE BRACKET u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA u',' # 0x6B -> COMMA u'%' # 0x6C -> PERCENT SIGN u'_' # 0x6D -> LOW LINE u'>' # 0x6E -> GREATER-THAN SIGN u'?' # 0x6F -> QUESTION MARK u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I u':' # 0x7A -> COLON u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA u"'" # 0x7D -> APOSTROPHE u'=' # 0x7E -> EQUALS SIGN u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE u'a' # 0x81 -> LATIN SMALL LETTER A u'b' # 0x82 -> LATIN SMALL LETTER B u'c' # 0x83 -> LATIN SMALL LETTER C u'd' # 0x84 -> LATIN SMALL LETTER D u'e' # 0x85 -> LATIN SMALL LETTER E u'f' # 0x86 -> LATIN SMALL LETTER F u'g' # 0x87 -> LATIN SMALL LETTER G u'h' # 0x88 -> LATIN SMALL LETTER H u'i' # 0x89 -> LATIN SMALL LETTER I u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'}' # 0x8C -> RIGHT CURLY BRACKET u'`' # 0x8D -> GRAVE ACCENT u'\xa6' # 0x8E -> BROKEN BAR u'\xb1' # 0x8F -> PLUS-MINUS SIGN u'\xb0' # 0x90 -> DEGREE SIGN u'j' # 0x91 -> LATIN SMALL LETTER J u'k' # 0x92 -> LATIN SMALL LETTER K u'l' # 0x93 -> LATIN SMALL LETTER L u'm' # 0x94 -> LATIN SMALL LETTER M u'n' # 0x95 -> LATIN SMALL LETTER N u'o' # 0x96 -> LATIN SMALL LETTER O u'p' # 0x97 -> LATIN SMALL LETTER P u'q' # 0x98 -> LATIN SMALL LETTER Q u'r' # 0x99 -> LATIN SMALL LETTER R u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE u'\xb8' # 0x9D -> CEDILLA u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE u'\xa4' # 0x9F -> CURRENCY SIGN u'\xb5' # 0xA0 -> MICRO SIGN u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS u's' # 0xA2 -> LATIN SMALL LETTER S u't' # 0xA3 -> LATIN SMALL LETTER T u'u' # 0xA4 -> LATIN SMALL LETTER U u'v' # 0xA5 -> LATIN SMALL LETTER V u'w' # 0xA6 -> LATIN SMALL LETTER W u'x' # 0xA7 -> LATIN SMALL LETTER X u'y' # 0xA8 -> LATIN SMALL LETTER Y u'z' # 0xA9 -> LATIN SMALL LETTER Z u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK u'\xbf' # 0xAB -> INVERTED QUESTION MARK u']' # 0xAC -> RIGHT SQUARE BRACKET u'$' # 0xAD -> DOLLAR SIGN u'@' # 0xAE -> COMMERCIAL AT u'\xae' # 0xAF -> REGISTERED SIGN u'\xa2' # 0xB0 -> CENT SIGN u'\xa3' # 0xB1 -> POUND SIGN u'\xa5' # 0xB2 -> YEN SIGN u'\xb7' # 0xB3 -> MIDDLE DOT u'\xa9' # 0xB4 -> COPYRIGHT SIGN u'\xa7' # 0xB5 -> SECTION SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS u'\xac' # 0xBA -> NOT SIGN u'|' # 0xBB -> VERTICAL LINE u'\xaf' # 0xBC -> MACRON u'\xa8' # 0xBD -> DIAERESIS u'\xb4' # 0xBE -> ACUTE ACCENT u'\xd7' # 0xBF -> MULTIPLICATION SIGN u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA u'A' # 0xC1 -> LATIN CAPITAL LETTER A u'B' # 0xC2 -> LATIN CAPITAL LETTER B u'C' # 0xC3 -> LATIN CAPITAL LETTER C u'D' # 0xC4 -> LATIN CAPITAL LETTER D u'E' # 0xC5 -> LATIN CAPITAL LETTER E u'F' # 0xC6 -> LATIN CAPITAL LETTER F u'G' # 0xC7 -> LATIN CAPITAL LETTER G u'H' # 0xC8 -> LATIN CAPITAL LETTER H u'I' # 0xC9 -> LATIN CAPITAL LETTER I u'\xad' # 0xCA -> SOFT HYPHEN u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'~' # 0xCC -> TILDE u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE u'J' # 0xD1 -> LATIN CAPITAL LETTER J u'K' # 0xD2 -> LATIN CAPITAL LETTER K u'L' # 0xD3 -> LATIN CAPITAL LETTER L u'M' # 0xD4 -> LATIN CAPITAL LETTER M u'N' # 0xD5 -> LATIN CAPITAL LETTER N u'O' # 0xD6 -> LATIN CAPITAL LETTER O u'P' # 0xD7 -> LATIN CAPITAL LETTER P u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q u'R' # 0xD9 -> LATIN CAPITAL LETTER R u'\xb9' # 0xDA -> SUPERSCRIPT ONE u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\\' # 0xDC -> REVERSE SOLIDUS u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS u'\xf7' # 0xE1 -> DIVISION SIGN u'S' # 0xE2 -> LATIN CAPITAL LETTER S u'T' # 0xE3 -> LATIN CAPITAL LETTER T u'U' # 0xE4 -> LATIN CAPITAL LETTER U u'V' # 0xE5 -> LATIN CAPITAL LETTER V u'W' # 0xE6 -> LATIN CAPITAL LETTER W u'X' # 0xE7 -> LATIN CAPITAL LETTER X u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z u'\xb2' # 0xEA -> SUPERSCRIPT TWO u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'#' # 0xEC -> NUMBER SIGN u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE u'0' # 0xF0 -> DIGIT ZERO u'1' # 0xF1 -> DIGIT ONE u'2' # 0xF2 -> DIGIT TWO u'3' # 0xF3 -> DIGIT THREE u'4' # 0xF4 -> DIGIT FOUR u'5' # 0xF5 -> DIGIT FIVE u'6' # 0xF6 -> DIGIT SIX u'7' # 0xF7 -> DIGIT SEVEN u'8' # 0xF8 -> DIGIT EIGHT u'9' # 0xF9 -> DIGIT NINE u'\xb3' # 0xFA -> SUPERSCRIPT THREE u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'"' # 0xFC -> QUOTATION MARK u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE u'\x9f' # 0xFF -> CONTROL ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
gpl-3.0
ckoepp/TwitterSearch
tests/test_ts.py
7
11946
from TwitterSearch import * import unittest import httpretty class TwitterSearchTest(unittest.TestCase): def createTSO(self): """ Returns a default TwitterSearchOrder instance """ tso = TwitterSearchOrder() tso.set_keywords(['foo']) return tso def createTUO(self, username="foo"): """ Returns a default TwitterUserOrder instance """ return TwitterUserOrder(username) def createTS(self): """ Returns a default TwitterSearch instance """ return TwitterSearch('aaabbb','cccddd','111222','333444', verify=False) def apiAnsweringMachine(self, filename): """ Generates faked API responses by returing content of a given file """ f = open(filename, 'r') for line in f: yield line f.close() def setUp(self): """ Constructor """ self.auth_url = TwitterSearch._base_url + TwitterSearch._verify_url self.search_url = TwitterSearch._base_url + TwitterSearch._search_url self.lang_url = TwitterSearch._base_url + TwitterSearch._lang_url self.user_url = TwitterSearch._base_url + TwitterSearch._user_url ################ TESTS ######################### @httpretty.activate def test_TS_set_supported_languages(self): """ Tests TwitterSearch.set_supported_languages() """ httpretty.register_uri( httpretty.GET, self.lang_url, body=self.apiAnsweringMachine('tests/mock-data/lang.log'), streaming=True, status=200, content_type='text/json' ) ts = self.createTS() tso = self.createTSO() try: ts.set_supported_languages(tso) self.assertEqual(tso.iso_6391.sort(), [ 'fi', 'da', 'pl', 'hu', 'fa', 'he' ].sort()) except Exception as e: self.assertTrue(False, "An exception was raised: %s" % e) @httpretty.activate def test_TS_authenticate(self): """ Tests TwitterSearch.authenticate() for valid logins """ httpretty.register_uri( httpretty.GET, self.auth_url, body=self.apiAnsweringMachine('tests/mock-data/verify.log'), streaming=True, status=200, content_type='text/json' ) ts = self.createTS() try: ts.authenticate(True) self.assertTrue(True) except TwitterSearchException as e: self.assertTrue(False, "An exception was raised: %s" % e) @httpretty.activate def test_TS_authenticate_fail(self): """ Tests TwitterSearch.authenticate() for invalid logins """ httpretty.register_uri( httpretty.GET, self.auth_url, body=self.apiAnsweringMachine('tests/mock-data/verify-error.log'), streaming=True, status=401, content_type='text/json' ) ts = self.createTS() try: ts.authenticate(True) self.assertTrue(False, "Exception should be raised instead") except TwitterSearchException as e: self.assertEqual(e.code, 401, "Exception code should be 401 but is %i" % e.code) @httpretty.activate def test_TS_search_usertimeline_iterable(self): """ Tests TwitterSearch.search_tweets_iterable() and .get_statistics() by using TwitterUserOrder class """ httpretty.register_uri(httpretty.GET, self.user_url, responses=[ httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/user/0.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/user/1.log')), # add an empty page to mock the behavior of Twitter Timeline API httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/user/2.log')) ] ) expected_cnt = 390 # 200 in 0.log and 190 in 1.log (2.log is empty) pages = 3 # 0.log, 1.log and 2.log ts = self.createTS() tuo = self.createTUO() tweet_cnt = 0 for tweet in ts.search_tweets_iterable(tuo): tweet_cnt += 1 # test statistics stats = ts.get_statistics() self.assertEqual(stats[1], tweet_cnt, "Tweet counter is NOT working correctly (%i should be %i)" % (stats[1], tweet_cnt)) self.assertEqual(stats[0], pages, "Query counter is NOT working correctly (%i should be %i)" % (stats[0], pages)) @httpretty.activate def test_TS_search_tweets_iterable_callback(self): """ Tests TwitterSearch.search_tweets_iterable(callback) by using TwitterSearchOrder class """ import sys if sys.version_info[0] < 3: self.assertTrue(True) # Dummy test for py2 doesn't have Mock class return httpretty.register_uri(httpretty.GET, self.search_url, responses=[ httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/0.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/1.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/2.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/3.log')) ] ) pages = 4 tso = self.createTSO() tso.set_count(4) ts = self.createTS() from unittest.mock import Mock mock = Mock() for tweet in ts.search_tweets_iterable(tso, callback=mock): mock.assert_called_with(ts) times = len(mock.call_args_list) self.assertEqual(pages, times, "Callback function was NOT called 4 times but %i times" % times) @httpretty.activate def test_TS_search_tweets_iterable(self): """ Tests TwitterSearch.search_tweets_iterable() and .get_statistics() by using TwitterSearchOrder class """ httpretty.register_uri(httpretty.GET, self.search_url, responses=[ httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/0.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/1.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/2.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/3.log')) ] ) cnt = 4 pages = 4 # 4 pages with 4*4-1 tweets in total tso = self.createTSO() tso.set_count(cnt) ts = self.createTS() tweet_cnt = 0 for tweet in ts.search_tweets_iterable(tso): tweet_cnt += 1 self.assertEqual( (cnt*4-1), tweet_cnt, "Wrong amount of tweets") # test statistics stats = ts.get_statistics() self.assertEqual(stats[1], tweet_cnt, "Tweet counter is NOT working correctly (%i should be %i)" % (stats[1], tweet_cnt)) self.assertEqual(stats[0], pages, "Query counter is NOT working correctly (%i should be %i)" % (stats[0], pages)) @httpretty.activate def test_TS_empty_results(self): """ Tests TwitterSearch.search_tweets_iterable() with empty results """ httpretty.register_uri(httpretty.GET, self.search_url, responses=[ httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/empty.log')), ]) tso = self.createTSO() ts = self.createTS() for tweet in ts.search_tweets_iterable(tso): self.assertFalse(True, "There should be no tweets to be found") @httpretty.activate def test_TS_search_tweets(self): """ Tests TwitterSearch.search_tweets() """ httpretty.register_uri(httpretty.GET, self.search_url, responses=[ httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/0.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/1.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/2.log')), httpretty.Response(streaming=True, status=200, content_type='text/json', body=self.apiAnsweringMachine('tests/mock-data/search/3.log')) ] ) cnt = 4 tso = self.createTSO() tso.set_count(cnt) ts = self.createTS() todo = True next_max_id = 0 max_ids = [] while(todo): max_ids.append(next_max_id) response = ts.search_tweets(tso) todo = len(response['content']['statuses']) == cnt for tweet in response['content']['statuses']: tweet_id = tweet['id'] if (tweet_id < next_max_id) or (next_max_id == 0): next_max_id = tweet_id next_max_id -= 1 tso.set_max_id(next_max_id) self.assertEqual(max_ids, [0, 355715848851300353, 355714667852726271, 355712782454358015], "Max ids NOT equal") def test_TS_string_output(self): """ Tests the string conversion of TwitterSearch """ access_token = "foobar" ts = TwitterSearch('aaabbb','cccddd', access_token, '333444', verify=False) self.assertEqual( "<%s %s>" % (ts.__class__.__name__, access_token), "%s" % ts) def test_TS_methods_exceptions(self): """ Tests various TwitterSearch methods with invalid inputs/states """ ts = self.createTS() with self.assertRaises(TwitterSearchException): ts.get_minimal_id() ts.send_search(101) ts.search_tweets("foobar") ts.get_metadata() ts.get_tweets() ts.get_amount_of_tweets() ts.set_supported_languages("joe.doe") def test_TS_minimal_id(self): """ Tests TwitterSearch.get_minimal_id method without request done """ ts = self.createTS() self.assertRaises(TwitterSearchException, ts.get_minimal_id, ) def test_TS_proxy(self): """ Tests the proxy functionality of TwitterSearch class """ # test constructor example_proxy = "some.proxy.com:1337" ts = TwitterSearch('aaabbb','cccddd','111222','333444', proxy=example_proxy, verify=False) self.assertEqual(ts.get_proxy(), example_proxy) # test manual setup example_proxy = "test.com:123" ts.set_proxy(example_proxy) self.assertEqual(ts.get_proxy(), example_proxy) try: ts.set_proxy(29.0) self.assertTrue(False, "Exception should be raised instead") except TwitterSearchException as e: self.assertEqual(e.code, 1009, "Exception code should be 401 but is %i" % e.code)
mit
Chris7/django-messages
django_messages/management.py
7
1297
from django.db.models import get_models, signals from django.conf import settings from django.utils.translation import ugettext_noop as _ if "notification" in settings.INSTALLED_APPS and getattr(settings, 'DJANGO_MESSAGES_NOTIFY', True): from notification import models as notification def create_notice_types(app, created_models, verbosity, **kwargs): notification.create_notice_type("messages_received", _("Message Received"), _("you have received a message"), default=2) notification.create_notice_type("messages_sent", _("Message Sent"), _("you have sent a message"), default=1) notification.create_notice_type("messages_replied", _("Message Replied"), _("you have replied to a message"), default=1) notification.create_notice_type("messages_reply_received", _("Reply Received"), _("you have received a reply to a message"), default=2) notification.create_notice_type("messages_deleted", _("Message Deleted"), _("you have deleted a message"), default=1) notification.create_notice_type("messages_recovered", _("Message Recovered"), _("you have undeleted a message"), default=1) signals.post_syncdb.connect(create_notice_types, sender=notification) else: print("Skipping creation of NoticeTypes as notification app not found")
bsd-3-clause
delighted/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
124
3032
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.checkout.commitinfo import CommitInfo from webkitpy.common.config.committers import CommitterList, Committer, Reviewer class CommitInfoTest(unittest.TestCase): def test_commit_info_creation(self): author = Committer("Author", "author@example.com") committer = Committer("Committer", "committer@example.com") reviewer = Reviewer("Reviewer", "reviewer@example.com") committer_list = CommitterList(committers=[author, committer], reviewers=[reviewer]) changelog_data = { "bug_id": 1234, "author_name": "Committer", "author_email": "author@example.com", "author": author, "reviewer_text": "Reviewer", "reviewer": reviewer, } commit = CommitInfo(123, "committer@example.com", changelog_data, committer_list) self.assertEqual(commit.revision(), 123) self.assertEqual(commit.bug_id(), 1234) self.assertEqual(commit.author_name(), "Committer") self.assertEqual(commit.author_email(), "author@example.com") self.assertEqual(commit.author(), author) self.assertEqual(commit.reviewer_text(), "Reviewer") self.assertEqual(commit.reviewer(), reviewer) self.assertEqual(commit.committer(), committer) self.assertEqual(commit.committer_email(), "committer@example.com") self.assertEqual(commit.responsible_parties(), set([author, committer, reviewer]))
bsd-3-clause
ibjohansen/acando-react-boilerplate-extended
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
1869
1247
# Copyright 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A clone of the default copy.deepcopy that doesn't handle cyclic structures or complex types except for dicts and lists. This is because gyp copies so large structure that small copy overhead ends up taking seconds in a project the size of Chromium.""" class Error(Exception): pass __all__ = ["Error", "deepcopy"] def deepcopy(x): """Deep copy operation on gyp objects such as strings, ints, dicts and lists. More than twice as fast as copy.deepcopy but much less generic.""" try: return _deepcopy_dispatch[type(x)](x) except KeyError: raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' + 'or expand simple_copy support.' % type(x)) _deepcopy_dispatch = d = {} def _deepcopy_atomic(x): return x for x in (type(None), int, long, float, bool, str, unicode, type): d[x] = _deepcopy_atomic def _deepcopy_list(x): return [deepcopy(a) for a in x] d[list] = _deepcopy_list def _deepcopy_dict(x): y = {} for key, value in x.iteritems(): y[deepcopy(key)] = deepcopy(value) return y d[dict] = _deepcopy_dict del d
mit
moto-timo/ironpython3
Src/StdLib/Lib/asyncio/windows_utils.py
37
6844
""" Various Windows specific bits and pieces """ import sys if sys.platform != 'win32': # pragma: no cover raise ImportError('win32 only') import _winapi import itertools import msvcrt import os import socket import subprocess import tempfile import warnings __all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle'] # Constants/globals BUFSIZE = 8192 PIPE = subprocess.PIPE STDOUT = subprocess.STDOUT _mmap_counter = itertools.count() if hasattr(socket, 'socketpair'): # Since Python 3.5, socket.socketpair() is now also available on Windows socketpair = socket.socketpair else: # Replacement for socket.socketpair() def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): """A socket pair usable as a self-pipe, for Windows. Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. """ if family == socket.AF_INET: host = '127.0.0.1' elif family == socket.AF_INET6: host = '::1' else: raise ValueError("Only AF_INET and AF_INET6 socket address " "families are supported") if type != socket.SOCK_STREAM: raise ValueError("Only SOCK_STREAM socket type is supported") if proto != 0: raise ValueError("Only protocol zero is supported") # We create a connected TCP socket. Note the trick with setblocking(0) # that prevents us from having to create a thread. lsock = socket.socket(family, type, proto) try: lsock.bind((host, 0)) lsock.listen(1) # On IPv6, ignore flow_info and scope_id addr, port = lsock.getsockname()[:2] csock = socket.socket(family, type, proto) try: csock.setblocking(False) try: csock.connect((addr, port)) except (BlockingIOError, InterruptedError): pass csock.setblocking(True) ssock, _ = lsock.accept() except: csock.close() raise finally: lsock.close() return (ssock, csock) # Replacement for os.pipe() using handles instead of fds def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): """Like os.pipe() but with overlapped support and using handles not fds.""" address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' % (os.getpid(), next(_mmap_counter))) if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = bufsize, bufsize else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, bufsize openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE if overlapped[0]: openmode |= _winapi.FILE_FLAG_OVERLAPPED if overlapped[1]: flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED else: flags_and_attribs = 0 h1 = h2 = None try: h1 = _winapi.CreateNamedPipe( address, openmode, _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, flags_and_attribs, _winapi.NULL) ov = _winapi.ConnectNamedPipe(h1, overlapped=True) ov.GetOverlappedResult(True) return h1, h2 except: if h1 is not None: _winapi.CloseHandle(h1) if h2 is not None: _winapi.CloseHandle(h2) raise # Wrapper for a pipe handle class PipeHandle: """Wrapper for an overlapped pipe handle which is vaguely file-object like. The IOCP event loop can use these instead of socket objects. """ def __init__(self, handle): self._handle = handle def __repr__(self): if self._handle is not None: handle = 'handle=%r' % self._handle else: handle = 'closed' return '<%s %s>' % (self.__class__.__name__, handle) @property def handle(self): return self._handle def fileno(self): if self._handle is None: raise ValueError("I/O operatioon on closed pipe") return self._handle def close(self, *, CloseHandle=_winapi.CloseHandle): if self._handle is not None: CloseHandle(self._handle) self._handle = None def __del__(self): if self._handle is not None: warnings.warn("unclosed %r" % self, ResourceWarning) self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() # Replacement for subprocess.Popen using overlapped pipe handles class Popen(subprocess.Popen): """Replacement for subprocess.Popen using overlapped pipe handles. The stdin, stdout, stderr are None or instances of PipeHandle. """ def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): assert not kwds.get('universal_newlines') assert kwds.get('bufsize', 0) == 0 stdin_rfd = stdout_wfd = stderr_wfd = None stdin_wh = stdout_rh = stderr_rh = None if stdin == PIPE: stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) else: stdin_rfd = stdin if stdout == PIPE: stdout_rh, stdout_wh = pipe(overlapped=(True, False)) stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) else: stdout_wfd = stdout if stderr == PIPE: stderr_rh, stderr_wh = pipe(overlapped=(True, False)) stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) elif stderr == STDOUT: stderr_wfd = stdout_wfd else: stderr_wfd = stderr try: super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, stderr=stderr_wfd, **kwds) except: for h in (stdin_wh, stdout_rh, stderr_rh): if h is not None: _winapi.CloseHandle(h) raise else: if stdin_wh is not None: self.stdin = PipeHandle(stdin_wh) if stdout_rh is not None: self.stdout = PipeHandle(stdout_rh) if stderr_rh is not None: self.stderr = PipeHandle(stderr_rh) finally: if stdin == PIPE: os.close(stdin_rfd) if stdout == PIPE: os.close(stdout_wfd) if stderr == PIPE: os.close(stderr_wfd)
apache-2.0
t-tran/libcloud
libcloud/container/utils/docker.py
12
6227
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement from base64 import b64encode from libcloud.common.base import Connection, JsonResponse from libcloud.container.base import ContainerImage __all__ = [ 'RegistryClient', 'HubClient' ] class DockerHubConnection(Connection): responseCls = JsonResponse def __init__(self, host, username=None, password=None, secure=True, port=None, url=None, timeout=None, proxy_url=None, backoff=None, retry_delay=None): super(DockerHubConnection, self).__init__(secure=secure, host=host, port=port, url=url, timeout=timeout, proxy_url=proxy_url, backoff=backoff, retry_delay=retry_delay) self.username = username self.password = password def add_default_headers(self, headers): headers['Content-Type'] = 'application/json' if self.username is not None: authstr = 'Basic ' + str( b64encode( ('%s:%s' % (self.username, self.password)) .encode('latin1')) .strip() ) headers['Authorization'] = authstr return headers class RegistryClient(object): """ A client for the Docker v2 registry API """ connectionCls = DockerHubConnection def __init__(self, host, username=None, password=None, **kwargs): """ Construct a Docker hub client :param username: (optional) Your Hub account username :type username: ``str`` :param password: (optional) Your hub account password :type password: ``str`` """ self.connection = self.connectionCls(host, username, password, **kwargs) def list_images(self, repository_name, namespace='library', max_count=100): """ List the tags (versions) in a repository :param repository_name: The name of the repository e.g. 'ubuntu' :type repository_name: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :param max_count: The maximum number of records to return :type max_count: ``int`` :return: A list of images :rtype: ``list`` of :class:`libcloud.container.base.ContainerImage` """ path = '/v2/repositories/%s/%s/tags/?page=1&page_size=%s' \ % (namespace, repository_name, max_count) response = self.connection.request(path) images = [] for image in response.object['results']: images.append(self._to_image(repository_name, image)) return images def get_repository(self, repository_name, namespace='library'): """ Get the information about a specific repository :param repository_name: The name of the repository e.g. 'ubuntu' :type repository_name: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :return: The details of the repository :rtype: ``object`` """ path = '/v2/repositories/%s/%s/' % (namespace, repository_name) response = self.connection.request(path) return response.object def get_image(self, repository_name, tag='latest', namespace='library'): """ Get an image from a repository with a specific tag :param repository_name: The name of the repository, e.g. ubuntu :type repository_name: ``str`` :param tag: (optional) The image tag (defaults to latest) :type tag: ``str`` :param namespace: (optional) The docker namespace :type namespace: ``str`` :return: A container image :rtype: :class:`libcloud.container.base.ContainerImage` """ path = '/v2/repositories/%s/%s/tags/%s/' \ % (namespace, repository_name, tag) response = self.connection.request(path) return self._to_image(repository_name, response.object) def _to_image(self, repository_name, obj): path = '%s/%s:%s' % (self.connection.host, repository_name, obj['name']) return ContainerImage( id=obj['id'], path=path, name=path, version=obj['name'], extra={ 'full_size': obj['full_size'] }, driver=None ) class HubClient(RegistryClient): """ A client for the Docker Hub API The hub is based on the v2 registry API """ host = 'registry.hub.docker.com' def __init__(self, username=None, password=None, **kwargs): """ Construct a Docker hub client :param username: (optional) Your Hub account username :type username: ``str`` :param password: (optional) Your hub account password :type password: ``str`` """ super(HubClient, self).__init__(self.host, username, password, **kwargs)
apache-2.0
SurfasJones/icecream-info
icecream/lib/python2.7/site-packages/sphinx/errors.py
16
1711
# -*- coding: utf-8 -*- """ sphinx.errors ~~~~~~~~~~~~~ Contains SphinxError and a few subclasses (in an extra module to avoid circular import problems). :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ class SphinxError(Exception): """ Base class for Sphinx errors that are shown to the user in a nicer way than normal exceptions. """ category = 'Sphinx error' class SphinxWarning(SphinxError): """Raised for warnings if warnings are treated as errors.""" category = 'Warning, treated as error' class ExtensionError(SphinxError): """Raised if something's wrong with the configuration.""" category = 'Extension error' def __init__(self, message, orig_exc=None): SphinxError.__init__(self, message) self.orig_exc = orig_exc def __repr__(self): if self.orig_exc: return '%s(%r, %r)' % (self.__class__.__name__, self.message, self.orig_exc) return '%s(%r)' % (self.__class__.__name__, self.message) def __str__(self): parent_str = SphinxError.__str__(self) if self.orig_exc: return '%s (exception: %s)' % (parent_str, self.orig_exc) return parent_str class ConfigError(SphinxError): category = 'Configuration error' class ThemeError(SphinxError): category = 'Theme error' class VersionRequirementError(SphinxError): category = 'Sphinx version error' class PycodeError(Exception): def __str__(self): res = self.args[0] if len(self.args) > 1: res += ' (exception was: %r)' % self.args[1] return res
mit
cfogelberg/postgres-fiddle
src/scripts/install.py
1
10130
''' Installs the application to a server Installs the application to a server, using the current location of the extracted files as their final location (so extract the files from any install archive to where they should be installed first). - Guides user through configuration file changes - Sets up and configures the database and database schema - Executes further database-level tasks like index creation (optional) - Symlinks the desired application directory (e.g. /opt/postgres-fiddle/app) to the install location Assumptions: - That the database engine is running and listening ''' #!/usr/bin/python import os as os import uuid as uuid import lib.general as general import configure as configure CONST_DB_SETUP_COMMANDS_TEMPLATE = """ sudo -u postgres psql -c "CREATE USER {db.user} WITH PASSWORD '{db.pw}';" sudo -u postgres psql -c "CREATE DATABASE {db.name} OWNER {db.user};" sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE {db.name} TO {db.user};" sudo -u postgres psql -c "CREATE SCHEMA {db.schema} AUTHORIZATION {db.user};" {db.name} sudo -u postgres psql -c 'CREATE EXTENSION "uuid-ossp" SCHEMA {db.schema};' {db.name} """ CONST_INITIALISE_PR = """ 'use strict'; var pr = require('app/util/pr'); var q = require('q'); pr.sq.sync({ force: true }) .then(function() { var hard_coded_tag_promises = q.all([ pr.pr.entry.tag.create({value: 'foo'}), pr.pr.entry.tag.create({value: 'bar'}), pr.pr.entry.tag.create({value: 'baz'}) ]); var hard_coded_entry_promises = q.all([ pr.pr.entry.entry.create({ body: 'This is entry 0. Here is some text.', date: new Date(2015, 2, 10) }), pr.pr.entry.entry.create({ body: 'This is entry one. Here is some more text.', date: new Date(2015, 2, 10) }), pr.pr.entry.entry.create({ body: 'This is entry tertius III. Here is interesting text.', date: new Date(2015, 2, 12) }), pr.pr.entry.entry.create({ body: 'this is entry iv i dont know punctuation', date: new Date(2015, 2, 11) }), pr.pr.entry.entry.create({ body: 'This is entry si4 with id 5 and a fullstop.', date: new Date(2015, 2, 13) }), pr.pr.entry.entry.create({ body: 'This is entry hex. Should I be a magical curse?', date: new Date(2015, 2, 14) }) ]); return q.all([hard_coded_tag_promises, hard_coded_entry_promises]); }) .spread(function(hard_coded_tags, hard_coded_entries) { return q.all([ hard_coded_entries[0].setTags([hard_coded_tags[0], hard_coded_tags[1]]), hard_coded_entries[1].setTags([hard_coded_tags[2]]), hard_coded_entries[2].setTags([hard_coded_tags[1], hard_coded_tags[2]]), hard_coded_entries[3].setTags([hard_coded_tags[0]]), hard_coded_entries[4].setTags([hard_coded_tags[1]]), hard_coded_entries[5].setTags([hard_coded_tags[0], hard_coded_tags[1], hard_coded_tags[2]]) ]); }) .then(function() { pr.sq.close(); }) .done(); """ CONST_DB_ADDITIONAL_COMMANDS = """ sudo -u postgres psql -c "CREATE INDEX entry_tag_entry_ndx ON {db.schema}.entry_tag (entry_id);" {db.name} sudo -u postgres psql -c "CREATE INDEX entry_tag_tag_ndx ON {db.schema}.entry_tag (tag_id);" {db.name} """ def read_db_configuration(install_dir_path): db_config_path = 'server/app/config/database.js' with open(os.path.join(install_dir_path, db_config_path), 'r') as db_config_file: curr_file_as_string = db_config_file.read() user = general.value_from_file_string('user: \'([a-zA-Z_]*)\'', curr_file_as_string, db_config_path, 'User') pw = general.value_from_file_string('password: \'([a-zA-Z_]*)\'', curr_file_as_string, db_config_path, 'Password') name = general.value_from_file_string('name: \'([a-zA-Z_]*)\'', curr_file_as_string, db_config_path, 'Name') schema = general.value_from_file_string('schema: \'([a-zA-Z_]*)\'', curr_file_as_string, db_config_path, 'Schema') return (user, pw, name, schema) def execute_shell_commands(commands_list): for command in commands_list: print('Executing: ' + command) os.system(command) def setup_database(install_dir_path): print('******************************************************************') print(' SETTING UP DATABASE') print('******************************************************************') print('NB: Commands may create spurious stderr output if postgres user cannot read install dir') # Read DB configuration from server/app/config/database.js (user, pw, name, schema) = read_db_configuration(install_dir_path) # Generate and write DB creation shell script db_setup_commands = CONST_DB_SETUP_COMMANDS_TEMPLATE \ .replace('{db.user}', user) \ .replace('{db.pw}', pw) \ .replace('{db.name}', name) \ .replace('{db.schema}', schema) \ .strip() \ .split('\n') # Execute the DB setup commands execute_shell_commands(db_setup_commands) print('******************************************************************') print('') print('') print('') def initialise_schema(install_dir_path): print('******************************************************************') print(' INITIALISING DB SCHEMA') print('******************************************************************') # Write initialise PR script to a temp file in server application directory temp_nodefile_file_path = 'TEMP-' + str(uuid.uuid4()) + '-initialise-pr.js' print('Generating temporary node script to: ' + temp_nodefile_file_path) temp_nodefile_full_path = os.path.join( install_dir_path, 'server', 'app', temp_nodefile_file_path ) with open(temp_nodefile_full_path, 'w') as temp_nodefile_file: temp_nodefile_file.write(CONST_INITIALISE_PR) # Run the file as a node server script from a directory with a logs directory # cwd = os.getcwd() print('Changing working directory to ' + install_dir_path) os.chdir(install_dir_path) initialise_pr_command = 'node ' + os.path.join('server', 'app', temp_nodefile_file_path) print('Executing : ' + initialise_pr_command) os.system(initialise_pr_command) print('Restoring working directory to ' + cwd) os.chdir(cwd) # Delete the temporarily generated file print('Deleting ' + temp_nodefile_full_path) os.unlink(temp_nodefile_full_path) print('******************************************************************') print('') print('') print('') def execute_additional_db_tasks(install_dir_path): print('******************************************************************') print(' EXECUTING ADDITIONAL DB TASKS') print('******************************************************************') (user, pw, name, schema) = read_db_configuration(install_dir_path) db_additional_commands = CONST_DB_ADDITIONAL_COMMANDS \ .replace('{db.user}', user) \ .replace('{db.pw}', pw) \ .replace('{db.name}', name) \ .replace('{db.schema}', schema) \ .strip() \ .split('\n') execute_shell_commands(db_additional_commands) print('******************************************************************') print('') print('') print('') def create_app_symlink(install_dir_path, app_symlink_path): print('******************************************************************') print(' CREATING APP SYMLINK') print('******************************************************************') if not os.path.exists(os.path.dirname(app_symlink_path)): os.makedirs(os.path.dirname(app_symlink_path)) os.symlink(install_dir_path, app_symlink_path) print('******************************************************************') print('') print('') print('') def install_app(install_dir_path, app_symlink_path): ''' Installs the application by executing the following process: - (0) Check install_dir_path exists and app_symlink_path doesn't - (1) Guide user through configuration file changes - (2) Initialise the DB, creating users and schema - (3) Set up the database schema - (4) Executes additional database level tasks (like index creation for improved performance) - (5) Symlinks the install directory to the target application directory ''' # (0) if not os.path.exists(install_dir_path): raise Error( 'Error:\n' + 'Install directory path does not exist: "' + install_dir_path + '"' ) if os.path.lexists(app_symlink_path): raise Error( 'Error:\n' + 'App symlink already exists: "' + app_symlink_path + '"' ) # (1) configure.configure_app( current_value_install_dir=install_dir_path, output_value_install_dir=install_dir_path ) # (2) setup_database( install_dir_path=install_dir_path ) # (3) initialise_schema( install_dir_path=install_dir_path ) # (4) execute_additional_db_tasks( install_dir_path=install_dir_path ) # (5) create_app_symlink( install_dir_path=install_dir_path, app_symlink_path=app_symlink_path ) if __name__ == '__main__': print('NB: The application install directory is probably the parent of this scripts directory') install_dir_path = raw_input('Enter the application install directory path: ').strip() app_symlink_path = raw_input('Enter the application symlink path: ').strip() print('\nYou have entered:') print('- application install directory path: ' + install_dir_path) print('- application symlink path: ' + app_symlink_path) if general.prompt_for_confirm('Is this correct?'): print('') install_app( install_dir_path=install_dir_path, app_symlink_path=app_symlink_path ) print('') print('') print('') print('******************************************************************') print('******************************************************************') print('******************************************************************') print(' INSTALL COMPLETE') print('******************************************************************') print('******************************************************************') print('******************************************************************') else: print('Aborting')
mit
her0e1c1/click
click/types.py
29
17490
import os import sys import stat from ._compat import open_stream, text_type, filename_to_ui, \ get_filesystem_encoding, get_streerror from .exceptions import BadParameter from .utils import safecall, LazyFile class ParamType(object): """Helper for converting values through types. The following is necessary for a valid type: * it needs a name * it needs to pass through None unchanged * it needs to convert from a string * it needs to convert its result type through unchanged (eg: needs to be idempotent) * it needs to be able to deal with param and context being `None`. This can be the case when the object is used with prompt inputs. """ is_composite = False #: the descriptive name of this type name = None #: if a list of this type is expected and the value is pulled from a #: string environment variable, this is what splits it up. `None` #: means any whitespace. For all parameters the general rule is that #: whitespace splits them up. The exception are paths and files which #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on #: Windows). envvar_list_splitter = None def __call__(self, value, param=None, ctx=None): if value is not None: return self.convert(value, param, ctx) def get_metavar(self, param): """Returns the metavar default for this param if it provides one.""" def get_missing_message(self, param): """Optionally might return extra information about a missing parameter. .. versionadded:: 2.0 """ def convert(self, value, param, ctx): """Converts the value. This is not invoked for values that are `None` (the missing value). """ return value def split_envvar_value(self, rv): """Given a value from an environment variable this splits it up into small chunks depending on the defined envvar list splitter. If the splitter is set to `None`, which means that whitespace splits, then leading and trailing whitespace is ignored. Otherwise, leading and trailing splitters usually lead to empty items being included. """ return (rv or '').split(self.envvar_list_splitter) def fail(self, message, param=None, ctx=None): """Helper method to fail with an invalid value message.""" raise BadParameter(message, ctx=ctx, param=param) class CompositeParamType(ParamType): is_composite = True @property def arity(self): raise NotImplementedError() class FuncParamType(ParamType): def __init__(self, func): self.name = func.__name__ self.func = func def convert(self, value, param, ctx): try: return self.func(value) except ValueError: try: value = text_type(value) except UnicodeError: value = str(value).decode('utf-8', 'replace') self.fail(value, param, ctx) class UnprocessedParamType(ParamType): name = 'text' def convert(self, value, param, ctx): return value def __repr__(self): return 'UNPROCESSED' class StringParamType(ParamType): name = 'text' def convert(self, value, param, ctx): if isinstance(value, bytes): try: enc = getattr(sys.stdin, 'encoding', None) if enc is not None: value = value.decode(enc) except UnicodeError: try: value = value.decode(get_filesystem_encoding()) except UnicodeError: value = value.decode('utf-8', 'replace') return value return value def __repr__(self): return 'STRING' class Choice(ParamType): """The choice type allows a value to be checked against a fixed set of supported values. All of these values have to be strings. See :ref:`choice-opts` for an example. """ name = 'choice' def __init__(self, choices): self.choices = choices def get_metavar(self, param): return '[%s]' % '|'.join(self.choices) def get_missing_message(self, param): return 'Choose from %s.' % ', '.join(self.choices) def convert(self, value, param, ctx): # Exact match if value in self.choices: return value # Match through normalization if ctx is not None and \ ctx.token_normalize_func is not None: value = ctx.token_normalize_func(value) for choice in self.choices: if ctx.token_normalize_func(choice) == value: return choice self.fail('invalid choice: %s. (choose from %s)' % (value, ', '.join(self.choices)), param, ctx) def __repr__(self): return 'Choice(%r)' % list(self.choices) class IntParamType(ParamType): name = 'integer' def convert(self, value, param, ctx): try: return int(value) except ValueError: self.fail('%s is not a valid integer' % value, param, ctx) def __repr__(self): return 'INT' class IntRange(IntParamType): """A parameter that works similar to :data:`click.INT` but restricts the value to fit into a range. The default behavior is to fail if the value falls outside the range, but it can also be silently clamped between the two edges. See :ref:`ranges` for an example. """ name = 'integer range' def __init__(self, min=None, max=None, clamp=False): self.min = min self.max = max self.clamp = clamp def convert(self, value, param, ctx): rv = IntParamType.convert(self, value, param, ctx) if self.clamp: if self.min is not None and rv < self.min: return self.min if self.max is not None and rv > self.max: return self.max if self.min is not None and rv < self.min or \ self.max is not None and rv > self.max: if self.min is None: self.fail('%s is bigger than the maximum valid value ' '%s.' % (rv, self.max), param, ctx) elif self.max is None: self.fail('%s is smaller than the minimum valid value ' '%s.' % (rv, self.min), param, ctx) else: self.fail('%s is not in the valid range of %s to %s.' % (rv, self.min, self.max), param, ctx) return rv def __repr__(self): return 'IntRange(%r, %r)' % (self.min, self.max) class BoolParamType(ParamType): name = 'boolean' def convert(self, value, param, ctx): if isinstance(value, bool): return bool(value) value = value.lower() if value in ('true', '1', 'yes', 'y'): return True elif value in ('false', '0', 'no', 'n'): return False self.fail('%s is not a valid boolean' % value, param, ctx) def __repr__(self): return 'BOOL' class FloatParamType(ParamType): name = 'float' def convert(self, value, param, ctx): try: return float(value) except ValueError: self.fail('%s is not a valid floating point value' % value, param, ctx) def __repr__(self): return 'FLOAT' class UUIDParameterType(ParamType): name = 'uuid' def convert(self, value, param, ctx): import uuid try: return uuid.UUID(value) except ValueError: self.fail('%s is not a valid UUID value' % value, param, ctx) def __repr__(self): return 'UUID' class File(ParamType): """Declares a parameter to be a file for reading or writing. The file is automatically closed once the context tears down (after the command finished working). Files can be opened for reading or writing. The special value ``-`` indicates stdin or stdout depending on the mode. By default, the file is opened for reading text data, but it can also be opened in binary mode or for writing. The encoding parameter can be used to force a specific encoding. The `lazy` flag controls if the file should be opened immediately or upon first IO. The default is to be non lazy for standard input and output streams as well as files opened for reading, lazy otherwise. Starting with Click 2.0, files can also be opened atomically in which case all writes go into a separate file in the same folder and upon completion the file will be moved over to the original location. This is useful if a file regularly read by other users is modified. See :ref:`file-args` for more information. """ name = 'filename' envvar_list_splitter = os.path.pathsep def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, atomic=False): self.mode = mode self.encoding = encoding self.errors = errors self.lazy = lazy self.atomic = atomic def resolve_lazy_flag(self, value): if self.lazy is not None: return self.lazy if value == '-': return False elif 'w' in self.mode: return True return False def convert(self, value, param, ctx): try: if hasattr(value, 'read') or hasattr(value, 'write'): return value lazy = self.resolve_lazy_flag(value) if lazy: f = LazyFile(value, self.mode, self.encoding, self.errors, atomic=self.atomic) if ctx is not None: ctx.call_on_close(f.close_intelligently) return f f, should_close = open_stream(value, self.mode, self.encoding, self.errors, atomic=self.atomic) # If a context is provided, we automatically close the file # at the end of the context execution (or flush out). If a # context does not exist, it's the caller's responsibility to # properly close the file. This for instance happens when the # type is used with prompts. if ctx is not None: if should_close: ctx.call_on_close(safecall(f.close)) else: ctx.call_on_close(safecall(f.flush)) return f except (IOError, OSError) as e: self.fail('Could not open file: %s: %s' % ( filename_to_ui(value), get_streerror(e), ), param, ctx) class Path(ParamType): """The path type is similar to the :class:`File` type but it performs different checks. First of all, instead of returning an open file handle it returns just the filename. Secondly, it can perform various basic checks about what the file or directory should be. :param exists: if set to true, the file or directory needs to exist for this value to be valid. If this is not required and a file does indeed not exist, then all further checks are silently skipped. :param file_okay: controls if a file is a possible value. :param dir_okay: controls if a directory is a possible value. :param writable: if true, a writable check is performed. :param readable: if true, a readable check is performed. :param resolve_path: if this is true, then the path is fully resolved before the value is passed onwards. This means that it's absolute and symlinks are resolved. """ envvar_list_splitter = os.path.pathsep def __init__(self, exists=False, file_okay=True, dir_okay=True, writable=False, readable=True, resolve_path=False): self.exists = exists self.file_okay = file_okay self.dir_okay = dir_okay self.writable = writable self.readable = readable self.resolve_path = resolve_path if self.file_okay and not self.dir_okay: self.name = 'file' self.path_type = 'File' if self.dir_okay and not self.file_okay: self.name = 'directory' self.path_type = 'Directory' else: self.name = 'path' self.path_type = 'Path' def convert(self, value, param, ctx): rv = value if self.resolve_path: rv = os.path.realpath(rv) try: st = os.stat(rv) except OSError: if not self.exists: return rv self.fail('%s "%s" does not exist.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if not self.file_okay and stat.S_ISREG(st.st_mode): self.fail('%s "%s" is a file.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if not self.dir_okay and stat.S_ISDIR(st.st_mode): self.fail('%s "%s" is a directory.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if self.writable and not os.access(value, os.W_OK): self.fail('%s "%s" is not writable.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if self.readable and not os.access(value, os.R_OK): self.fail('%s "%s" is not readable.' % ( self.path_type, filename_to_ui(value) ), param, ctx) return rv class Tuple(CompositeParamType): """The default behavior of Click is to apply a type on a value directly. This works well in most cases, except for when `nargs` is set to a fixed count and different types should be used for different items. In this case the :class:`Tuple` type can be used. This type can only be used if `nargs` is set to a fixed number. For more information see :ref:`tuple-type`. This can be selected by using a Python tuple literal as a type. :param types: a list of types that should be used for the tuple items. """ def __init__(self, types): self.types = [convert_type(ty) for ty in types] @property def name(self): return "<" + " ".join(ty.name for ty in self.types) + ">" @property def arity(self): return len(self.types) def convert(self, value, param, ctx): if len(value) != len(self.types): raise TypeError('It would appear that nargs is set to conflict ' 'with the composite type arity.') return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) def convert_type(ty, default=None): """Converts a callable or python ty into the most appropriate param ty. """ guessed_type = False if ty is None and default is not None: if isinstance(default, tuple): ty = tuple(map(type, default)) else: ty = type(default) guessed_type = True if isinstance(ty, tuple): return Tuple(ty) if isinstance(ty, ParamType): return ty if ty is text_type or ty is str or ty is None: return STRING if ty is int: return INT # Booleans are only okay if not guessed. This is done because for # flags the default value is actually a bit of a lie in that it # indicates which of the flags is the one we want. See get_default() # for more information. if ty is bool and not guessed_type: return BOOL if ty is float: return FLOAT if guessed_type: return STRING # Catch a common mistake if __debug__: try: if issubclass(ty, ParamType): raise AssertionError('Attempted to use an uninstantiated ' 'parameter type (%s).' % ty) except TypeError: pass return FuncParamType(ty) #: A dummy parameter type that just does nothing. From a user's #: perspective this appears to just be the same as `STRING` but internally #: no string conversion takes place. This is necessary to achieve the #: same bytes/unicode behavior on Python 2/3 in situations where you want #: to not convert argument types. This is usually useful when working #: with file paths as they can appear in bytes and unicode. #: #: For path related uses the :class:`Path` type is a better choice but #: there are situations where an unprocessed type is useful which is why #: it is is provided. #: #: .. versionadded:: 4.0 UNPROCESSED = UnprocessedParamType() #: A unicode string parameter type which is the implicit default. This #: can also be selected by using ``str`` as type. STRING = StringParamType() #: An integer parameter. This can also be selected by using ``int`` as #: type. INT = IntParamType() #: A floating point value parameter. This can also be selected by using #: ``float`` as type. FLOAT = FloatParamType() #: A boolean parameter. This is the default for boolean flags. This can #: also be selected by using ``bool`` as a type. BOOL = BoolParamType() #: A UUID parameter. UUID = UUIDParameterType()
bsd-3-clause
bobobox/ansible
lib/ansible/modules/network/netvisor/pn_ospf.py
18
8828
#!/usr/bin/python """ PN-CLI vrouter-ospf-add/remove """ # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: pn_ospf author: "Pluribus Networks (@amitsi)" version_added: "2.2" version: 1.0 short_description: CLI command to add/remove ospf protocol to a vRouter. description: - Execute vrouter-ospf-add, vrouter-ospf-remove command. - This command adds/removes Open Shortest Path First(OSPF) routing protocol to a virtual router(vRouter) service. options: pn_cliusername: description: - Provide login username if user is not root. required: False pn_clipassword: description: - Provide login password if user is not root. required: False pn_cliswitch: description: - Target switch to run the CLI on. required: False state: description: - Assert the state of the ospf. Use 'present' to add ospf and 'absent' to remove ospf. required: True default: present choices: ['present', 'absent'] pn_vrouter_name: description: - Specify the name of the vRouter. required: True pn_network_ip: description: - Specify the network IP (IPv4 or IPv6) address. required: True pn_ospf_area: description: - Stub area number for the configuration. Required for vrouter-ospf-add. """ EXAMPLES = """ - name: "Add OSPF to vrouter" pn_ospf: state: present pn_vrouter_name: name-string pn_network_ip: 192.168.11.2/24 pn_ospf_area: 1.0.0.0 - name: "Remove OSPF from vrouter" pn_ospf: state: absent pn_vrouter_name: name-string """ RETURN = """ command: description: The CLI command run on the target node(s). stdout: description: The set of responses from the ospf command. returned: always type: list stderr: description: The set of error responses from the ospf command. returned: on error type: list changed: description: Indicates whether the CLI caused changes on the target. returned: always type: bool """ import shlex VROUTER_EXISTS = None NETWORK_EXISTS = None def pn_cli(module): """ This method is to generate the cli portion to launch the Netvisor cli. It parses the username, password, switch parameters from module. :param module: The Ansible module to fetch username, password and switch :return: returns the cli string for further processing """ username = module.params['pn_cliusername'] password = module.params['pn_clipassword'] cliswitch = module.params['pn_cliswitch'] if username and password: cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password) else: cli = '/usr/bin/cli --quiet ' if cliswitch == 'local': cli += ' switch-local ' else: cli += ' switch ' + cliswitch return cli def check_cli(module, cli): """ This method checks if vRouter exists on the target node. This method also checks for idempotency using the vrouter-ospf-show command. If the given vRouter exists, return VROUTER_EXISTS as True else False. If an OSPF network with the given ip exists on the given vRouter, return NETWORK_EXISTS as True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string :return Global Booleans: VROUTER_EXISTS, NETWORK_EXISTS """ vrouter_name = module.params['pn_vrouter_name'] network_ip = module.params['pn_network_ip'] # Global flags global VROUTER_EXISTS, NETWORK_EXISTS # Check for vRouter check_vrouter = cli + ' vrouter-show format name no-show-headers ' check_vrouter = shlex.split(check_vrouter) out = module.run_command(check_vrouter)[1] out = out.split() if vrouter_name in out: VROUTER_EXISTS = True else: VROUTER_EXISTS = False # Check for OSPF networks show = cli + ' vrouter-ospf-show vrouter-name %s ' % vrouter_name show += 'format network no-show-headers' show = shlex.split(show) out = module.run_command(show)[1] out = out.split() if network_ip in out: NETWORK_EXISTS = True else: NETWORK_EXISTS = False def run_cli(module, cli): """ This method executes the cli command on the target node(s) and returns the output. The module then exits based on the output. :param cli: the complete cli string to be executed on the target node(s). :param module: The Ansible module to fetch command """ cliswitch = module.params['pn_cliswitch'] state = module.params['state'] command = get_command_from_state(state) cmd = shlex.split(cli) result, out, err = module.run_command(cmd) print_cli = cli.split(cliswitch)[1] # Response in JSON format if result != 0: module.exit_json( command=print_cli, stderr=err.strip(), msg="%s operation failed" % command, changed=False ) if out: module.exit_json( command=print_cli, stdout=out.strip(), msg="%s operation completed" % command, changed=True ) else: module.exit_json( command=print_cli, msg="%s operation completed" % command, changed=True ) def get_command_from_state(state): """ This method gets appropriate command name for the state specified. It returns the command name for the specified state. :param state: The state for which the respective command name is required. """ command = None if state == 'present': command = 'vrouter-ospf-add' if state == 'absent': command = 'vrouter-ospf-remove' return command def main(): """ This section is for arguments parsing """ module = AnsibleModule( argument_spec=dict( pn_cliusername=dict(required=False, type='str'), pn_clipassword=dict(required=False, type='str', no_log=True), pn_cliswitch=dict(required=False, type='str', default='local'), state=dict(type='str', default='present', choices=['present', 'absent']), pn_vrouter_name=dict(required=True, type='str'), pn_network_ip=dict(required=True, type='str'), pn_ospf_area=dict(type='str') ), required_if=( ['state', 'present', ['pn_network_ip', 'pn_ospf_area']], ['state', 'absent', ['pn_network_ip']] ) ) # Accessing the arguments state = module.params['state'] vrouter_name = module.params['pn_vrouter_name'] network_ip = module.params['pn_network_ip'] ospf_area = module.params['pn_ospf_area'] command = get_command_from_state(state) # Building the CLI command string cli = pn_cli(module) check_cli(module, cli) if state == 'present': if VROUTER_EXISTS is False: module.exit_json( skipped=True, msg='vRouter %s does not exist' % vrouter_name ) if NETWORK_EXISTS is True: module.exit_json( skipped=True, msg=('OSPF with network ip %s already exists on %s' % (network_ip, vrouter_name)) ) cli += (' %s vrouter-name %s network %s ospf-area %s' % (command, vrouter_name, network_ip, ospf_area)) if state == 'absent': if VROUTER_EXISTS is False: module.exit_json( skipped=True, msg='vRouter %s does not exist' % vrouter_name ) if NETWORK_EXISTS is False: module.exit_json( skipped=True, msg=('OSPF with network ip %s already exists on %s' % (network_ip, vrouter_name)) ) cli += (' %s vrouter-name %s network %s' % (command, vrouter_name, network_ip)) run_cli(module, cli) # AnsibleModule boilerplate from ansible.module_utils.basic import AnsibleModule if __name__ == '__main__': main()
gpl-3.0
slank/ansible
lib/ansible/plugins/lookup/csvfile.py
63
3508
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import codecs import csv from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.module_utils._text import to_bytes, to_native, to_text class CSVRecoder: """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ def __init__(self, f, encoding='utf-8'): self.reader = codecs.getreader(encoding)(f) def __iter__(self): return self def next(self): return self.reader.next().encode("utf-8") class CSVReader: """ A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds): f = CSVRecoder(f, encoding) self.reader = csv.reader(f, dialect=dialect, **kwds) def next(self): row = self.reader.next() return [to_text(s) for s in row] def __iter__(self): return self class LookupModule(LookupBase): def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1): try: f = open(filename, 'r') creader = CSVReader(f, delimiter=to_bytes(delimiter), encoding=encoding) for row in creader: if row[0] == key: return row[int(col)] except Exception as e: raise AnsibleError("csvfile: %s" % to_native(e)) return dflt def run(self, terms, variables=None, **kwargs): ret = [] for term in terms: params = term.split() key = params[0] paramvals = { 'col' : "1", # column to return 'default' : None, 'delimiter' : "TAB", 'file' : 'ansible.csv', 'encoding' : 'utf-8', } # parameters specified? try: for param in params[1:]: name, value = param.split('=') assert(name in paramvals) paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) if paramvals['delimiter'] == 'TAB': paramvals['delimiter'] = "\t" lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file']) var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col']) if var is not None: if type(var) is list: for v in var: ret.append(v) else: ret.append(var) return ret
gpl-3.0
fergalbyrne/nupic
tests/unit/nupic/regions/anomaly_region_test.py
27
4336
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2015, Numenta, Inc. Unless you have purchased from # Numenta, Inc. a separate commercial license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import tempfile import unittest import numpy from nupic.regions.AnomalyRegion import AnomalyRegion try: import capnp except ImportError: capnp = None if capnp: from nupic.regions.AnomalyRegion_capnp import AnomalyRegionProto class AnomalyRegionTest(unittest.TestCase): """Tests for anomaly region""" @unittest.skipUnless( capnp, "pycapnp is not installed, skipping serialization test.") def testWriteRead(self): predictedColumns = [[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]] activeColumns = [[0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [0, 1 ,0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0 ,0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0], [1, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [0, 1 ,1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]] anomalyExpected = (1.0, 0.25, 1.0/3.0, 2.0/3.0, 1.0, 2.0/3.0, 1.0, 0.0, 0.25, 0.25) anomalyRegion1 = AnomalyRegion() inputs = AnomalyRegion.getSpec()['inputs'] outputs = AnomalyRegion.getSpec()['outputs'] for i in xrange(0, 6): inputs['predictedColumns'] = numpy.array(predictedColumns[i]) inputs['activeColumns'] = numpy.array(activeColumns[i]) anomalyRegion1.compute(inputs, outputs) proto1 = AnomalyRegionProto.new_message() anomalyRegion1.write(proto1) # Write the proto to a temp file and read it back into a new proto with tempfile.TemporaryFile() as f: proto1.write(f) f.seek(0) proto2 = AnomalyRegionProto.read(f) # Load the deserialized proto anomalyRegion2 = AnomalyRegion.read(proto2) self.assertEqual(anomalyRegion1, anomalyRegion2) for i in xrange(6, 10): inputs['predictedColumns'] = numpy.array(predictedColumns[i]) inputs['activeColumns'] = numpy.array(activeColumns[i]) anomalyRegion1.compute(inputs, outputs) score1 = outputs['rawAnomalyScore'][0] anomalyRegion2.compute(inputs, outputs) score2 = outputs['rawAnomalyScore'][0] self.assertAlmostEqual( score1, anomalyExpected[i], places=5, msg="Anomaly score of %f doesn't match expected of %f" % ( score1, anomalyExpected[i])) self.assertAlmostEqual( score2, anomalyExpected[i], places=5, msg="Anomaly score of %f doesn't match expected of %f" % ( score2, anomalyExpected[i])) if __name__ == "__main__": unittest.main()
agpl-3.0
SivilTaram/edx-platform
cms/djangoapps/contentstore/views/tests/test_library.py
114
9392
""" Unit tests for contentstore.views.library More important high-level tests are in contentstore/tests/test_libraries.py """ from contentstore.tests.utils import AjaxEnabledTestClient, parse_json from contentstore.utils import reverse_course_url, reverse_library_url from contentstore.views.component import get_component_templates from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import LibraryFactory from mock import patch from opaque_keys.edx.locator import CourseKey, LibraryLocator import ddt from student.roles import LibraryUserRole LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries def make_url_for_lib(key): """ Get the RESTful/studio URL for testing the given library """ if isinstance(key, LibraryLocator): key = unicode(key) return LIBRARY_REST_URL + key @ddt.ddt class UnitTestLibraries(ModuleStoreTestCase): """ Unit tests for library views """ def setUp(self): user_password = super(UnitTestLibraries, self).setUp() self.client = AjaxEnabledTestClient() self.client.login(username=self.user.username, password=user_password) ###################################################### # Tests for /library/ - list and create libraries: @patch("contentstore.views.library.LIBRARIES_ENABLED", False) def test_with_libraries_disabled(self): """ The library URLs should return 404 if libraries are disabled. """ response = self.client.get_json(LIBRARY_REST_URL) self.assertEqual(response.status_code, 404) def test_list_libraries(self): """ Test that we can GET /library/ to list all libraries visible to the current user. """ # Create some more libraries libraries = [LibraryFactory.create() for _ in range(3)] lib_dict = dict([(lib.location.library_key, lib) for lib in libraries]) response = self.client.get_json(LIBRARY_REST_URL) self.assertEqual(response.status_code, 200) lib_list = parse_json(response) self.assertEqual(len(lib_list), len(libraries)) for entry in lib_list: self.assertIn("library_key", entry) self.assertIn("display_name", entry) key = CourseKey.from_string(entry["library_key"]) self.assertIn(key, lib_dict) self.assertEqual(entry["display_name"], lib_dict[key].display_name) del lib_dict[key] # To ensure no duplicates are matched @ddt.data("delete", "put") def test_bad_http_verb(self, verb): """ We should get an error if we do weird requests to /library/ """ response = getattr(self.client, verb)(LIBRARY_REST_URL) self.assertEqual(response.status_code, 405) def test_create_library(self): """ Create a library. """ response = self.client.ajax_post(LIBRARY_REST_URL, { 'org': 'org', 'library': 'lib', 'display_name': "New Library", }) self.assertEqual(response.status_code, 200) # That's all we check. More detailed tests are in contentstore.tests.test_libraries... @patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}) def test_lib_create_permission(self): """ Users who are not given course creator roles should still be able to create libraries. """ self.client.logout() ns_user, password = self.create_non_staff_user() self.client.login(username=ns_user.username, password=password) response = self.client.ajax_post(LIBRARY_REST_URL, { 'org': 'org', 'library': 'lib', 'display_name': "New Library", }) self.assertEqual(response.status_code, 200) @ddt.data( {}, {'org': 'org'}, {'library': 'lib'}, {'org': 'C++', 'library': 'lib', 'display_name': 'Lib with invalid characters in key'}, {'org': 'Org', 'library': 'Wh@t?', 'display_name': 'Lib with invalid characters in key'}, ) def test_create_library_invalid(self, data): """ Make sure we are prevented from creating libraries with invalid keys/data """ response = self.client.ajax_post(LIBRARY_REST_URL, data) self.assertEqual(response.status_code, 400) def test_no_duplicate_libraries(self): """ We should not be able to create multiple libraries with the same key """ lib = LibraryFactory.create() lib_key = lib.location.library_key response = self.client.ajax_post(LIBRARY_REST_URL, { 'org': lib_key.org, 'library': lib_key.library, 'display_name': "A Duplicate key, same as 'lib'", }) self.assertIn('already a library defined', parse_json(response)['ErrMsg']) self.assertEqual(response.status_code, 400) ###################################################### # Tests for /library/:lib_key/ - get a specific library as JSON or HTML editing view def test_get_lib_info(self): """ Test that we can get data about a library (in JSON format) using /library/:key/ """ # Create a library lib_key = LibraryFactory.create().location.library_key # Re-load the library from the modulestore, explicitly including version information: lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False) version = lib.location.library_key.version_guid self.assertNotEqual(version, None) response = self.client.get_json(make_url_for_lib(lib_key)) self.assertEqual(response.status_code, 200) info = parse_json(response) self.assertEqual(info['display_name'], lib.display_name) self.assertEqual(info['library_id'], unicode(lib_key)) self.assertEqual(info['previous_version'], None) self.assertNotEqual(info['version'], None) self.assertNotEqual(info['version'], '') self.assertEqual(info['version'], unicode(version)) def test_get_lib_edit_html(self): """ Test that we can get the studio view for editing a library using /library/:key/ """ lib = LibraryFactory.create() response = self.client.get(make_url_for_lib(lib.location.library_key)) self.assertEqual(response.status_code, 200) self.assertIn("<html", response.content) self.assertIn(lib.display_name, response.content) @ddt.data('library-v1:Nonexistent+library', 'course-v1:Org+Course', 'course-v1:Org+Course+Run', 'invalid') def test_invalid_keys(self, key_str): """ Check that various Nonexistent/invalid keys give 404 errors """ response = self.client.get_json(make_url_for_lib(key_str)) self.assertEqual(response.status_code, 404) def test_bad_http_verb_with_lib_key(self): """ We should get an error if we do weird requests to /library/ """ lib = LibraryFactory.create() for verb in ("post", "delete", "put"): response = getattr(self.client, verb)(make_url_for_lib(lib.location.library_key)) self.assertEqual(response.status_code, 405) def test_no_access(self): user, password = self.create_non_staff_user() self.client.login(username=user, password=password) lib = LibraryFactory.create() response = self.client.get(make_url_for_lib(lib.location.library_key)) self.assertEqual(response.status_code, 403) def test_get_component_templates(self): """ Verify that templates for adding discussion and advanced components to content libraries are not provided. """ lib = LibraryFactory.create() lib.advanced_modules = ['lti'] lib.save() templates = [template['type'] for template in get_component_templates(lib, library=True)] self.assertIn('problem', templates) self.assertNotIn('discussion', templates) self.assertNotIn('advanced', templates) def test_manage_library_users(self): """ Simple test that the Library "User Access" view works. Also tests that we can use the REST API to assign a user to a library. """ library = LibraryFactory.create() extra_user, _ = self.create_non_staff_user() manage_users_url = reverse_library_url('manage_library_users', unicode(library.location.library_key)) response = self.client.get(manage_users_url) self.assertEqual(response.status_code, 200) # extra_user has not been assigned to the library so should not show up in the list: self.assertNotIn(extra_user.username, response.content) # Now add extra_user to the library: user_details_url = reverse_course_url( 'course_team_handler', library.location.library_key, kwargs={'email': extra_user.email} ) edit_response = self.client.ajax_post(user_details_url, {"role": LibraryUserRole.ROLE}) self.assertIn(edit_response.status_code, (200, 204)) # Now extra_user should apear in the list: response = self.client.get(manage_users_url) self.assertEqual(response.status_code, 200) self.assertIn(extra_user.username, response.content)
agpl-3.0
Salat-Cx65/python-for-android
python-build/python-libs/gdata/build/lib/atom/mock_http_core.py
135
11149
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import StringIO import pickle import os.path import tempfile import atom.http_core class MockHttpClient(object): debug = None real_client = None # The following members are used to construct the session cache temp file # name. # These are combined to form the file name # /tmp/cache_prefix.cache_case_name.cache_test_name cache_name_prefix = 'gdata_live_test' cache_case_name = '' cache_test_name = '' def __init__(self, recordings=None, real_client=None): self._recordings = recordings or [] if real_client is not None: self.real_client = real_client def add_response(self, http_request, status, reason, headers=None, body=None): response = MockHttpResponse(status, reason, headers, body) # TODO Scrub the request and the response. self._recordings.append((http_request._copy(), response)) AddResponse = add_response def request(self, http_request): """Provide a recorded response, or record a response for replay. If the real_client is set, the request will be made using the real_client, and the response from the server will be recorded. If the real_client is None (the default), this method will examine the recordings and find the first which matches. """ request = http_request._copy() _scrub_request(request) if self.real_client is None: for recording in self._recordings: if _match_request(recording[0], request): return recording[1] else: # Pass along the debug settings to the real client. self.real_client.debug = self.debug # Make an actual request since we can use the real HTTP client. response = self.real_client.request(http_request) _scrub_response(response) self.add_response(request, response.status, response.reason, dict(response.getheaders()), response.read()) # Return the recording which we just added. return self._recordings[-1][1] return None Request = request def _save_recordings(self, filename): recording_file = open(os.path.join(tempfile.gettempdir(), filename), 'wb') pickle.dump(self._recordings, recording_file) def _load_recordings(self, filename): recording_file = open(os.path.join(tempfile.gettempdir(), filename), 'rb') self._recordings = pickle.load(recording_file) def _delete_recordings(self, filename): full_path = os.path.join(tempfile.gettempdir(), filename) if os.path.exists(full_path): os.remove(full_path) def _load_or_use_client(self, filename, http_client): if os.path.exists(os.path.join(tempfile.gettempdir(), filename)): self._load_recordings(filename) else: self.real_client = http_client def use_cached_session(self, name=None, real_http_client=None): """Attempts to load recordings from a previous live request. If a temp file with the recordings exists, then it is used to fulfill requests. If the file does not exist, then a real client is used to actually make the desired HTTP requests. Requests and responses are recorded and will be written to the desired temprary cache file when close_session is called. Args: name: str (optional) The file name of session file to be used. The file is loaded from the temporary directory of this machine. If no name is passed in, a default name will be constructed using the cache_name_prefix, cache_case_name, and cache_test_name of this object. real_http_client: atom.http_core.HttpClient the real client to be used if the cached recordings are not found. If the default value is used, this will be an atom.http_core.HttpClient. """ if real_http_client is None: real_http_client = atom.http_core.HttpClient() if name is None: self._recordings_cache_name = self.get_cache_file_name() else: self._recordings_cache_name = name self._load_or_use_client(self._recordings_cache_name, real_http_client) def close_session(self): """Saves recordings in the temporary file named in use_cached_session.""" if self.real_client is not None: self._save_recordings(self._recordings_cache_name) def delete_session(self, name=None): """Removes recordings from a previous live request.""" if name is None: self._delete_recordings(self._recordings_cache_name) else: self._delete_recordings(name) def get_cache_file_name(self): return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name, self.cache_test_name) def _match_request(http_request, stored_request): """Determines whether a request is similar enough to a stored request to cause the stored response to be returned.""" # Check to see if the host names match. if (http_request.uri.host is not None and http_request.uri.host != stored_request.uri.host): return False # Check the request path in the URL (/feeds/private/full/x) elif http_request.uri.path != stored_request.uri.path: return False # Check the method used in the request (GET, POST, etc.) elif http_request.method != stored_request.method: return False # If there is a gsession ID in either request, make sure that it is matched # exactly. elif ('gsessionid' in http_request.uri.query or 'gsessionid' in stored_request.uri.query): if 'gsessionid' not in stored_request.uri.query: return False elif 'gsessionid' not in http_request.uri.query: return False elif (http_request.uri.query['gsessionid'] != stored_request.uri.query['gsessionid']): return False # Ignores differences in the query params (?start-index=5&max-results=20), # the body of the request, the port number, HTTP headers, just to name a # few. return True def _scrub_request(http_request): """ Removes email address and password from a client login request. Since the mock server saves the request and response in plantext, sensitive information like the password should be removed before saving the recordings. At the moment only requests sent to a ClientLogin url are scrubbed. """ if (http_request and http_request.uri and http_request.uri.path and http_request.uri.path.endswith('ClientLogin')): # Remove the email and password from a ClientLogin request. http_request._body_parts = [] http_request.add_form_inputs( {'form_data': 'client login request has been scrubbed'}) else: # We can remove the body of the post from the recorded request, since # the request body is not used when finding a matching recording. http_request._body_parts = [] return http_request def _scrub_response(http_response): return http_response class EchoHttpClient(object): """Sends the request data back in the response. Used to check the formatting of the request as it was sent. Always responds with a 200 OK, and some information from the HTTP request is returned in special Echo-X headers in the response. The following headers are added in the response: 'Echo-Host': The host name and port number to which the HTTP connection is made. If no port was passed in, the header will contain host:None. 'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2 'Echo-Scheme': The beginning of the URL, usually 'http' or 'https' 'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc. """ def request(self, http_request): return self._http_request(http_request.uri, http_request.method, http_request.headers, http_request._body_parts) def _http_request(self, uri, method, headers=None, body_parts=None): body = StringIO.StringIO() response = atom.http_core.HttpResponse(status=200, reason='OK', body=body) if headers is None: response._headers = {} else: # Copy headers from the request to the response but convert values to # strings. Server response headers always come in as strings, so an int # should be converted to a corresponding string when echoing. for header, value in headers.iteritems(): response._headers[header] = str(value) response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port)) response._headers['Echo-Uri'] = uri._get_relative_path() response._headers['Echo-Scheme'] = uri.scheme response._headers['Echo-Method'] = method for part in body_parts: if isinstance(part, str): body.write(part) elif hasattr(part, 'read'): body.write(part.read()) body.seek(0) return response class SettableHttpClient(object): """An HTTP Client which responds with the data given in set_response.""" def __init__(self, status, reason, body, headers): """Configures the response for the server. See set_response for details on the arguments to the constructor. """ self.set_response(status, reason, body, headers) self.last_request = None def set_response(self, status, reason, body, headers): """Determines the response which will be sent for each request. Args: status: An int for the HTTP status code, example: 200, 404, etc. reason: String for the HTTP reason, example: OK, NOT FOUND, etc. body: The body of the HTTP response as a string or a file-like object (something with a read method). headers: dict of strings containing the HTTP headers in the response. """ self.response = atom.http_core.HttpResponse(status=status, reason=reason, body=body) self.response._headers = headers.copy() def request(self, http_request): self.last_request = http_request return self.response class MockHttpResponse(atom.http_core.HttpResponse): def __init__(self, status=None, reason=None, headers=None, body=None): self._headers = headers or {} if status is not None: self.status = status if reason is not None: self.reason = reason if body is not None: # Instead of using a file-like object for the body, store as a string # so that reads can be repeated. if hasattr(body, 'read'): self._body = body.read() else: self._body = body def read(self): return self._body
apache-2.0
qrkourier/ansible
lib/ansible/plugins/terminal/iosxr.py
49
1889
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import json from ansible.plugins.terminal import TerminalBase from ansible.errors import AnsibleConnectionFailure class TerminalModule(TerminalBase): terminal_stdout_re = [ re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"), re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"), re.compile(br']]>]]>[\r\n]?') ] terminal_stderr_re = [ re.compile(br"% ?Error"), re.compile(br"% ?Bad secret"), re.compile(br"invalid input", re.I), re.compile(br"(?:incomplete|ambiguous) command", re.I), re.compile(br"connection timed out", re.I), re.compile(br"[^\r\n]+ not found", re.I), re.compile(br"'[^']' +returned error code: ?\d+"), re.compile(br"Failed to commit", re.I) ] def on_open_shell(self): try: for cmd in (b'terminal length 0', b'terminal width 512', b'terminal exec prompt no-timestamp'): self._exec_cli_command(cmd) except AnsibleConnectionFailure: raise AnsibleConnectionFailure('unable to set terminal parameters')
gpl-3.0
pomegranited/edx-platform
lms/djangoapps/student_account/test/test_views.py
20
20748
# -*- coding: utf-8 -*- """ Tests for student account views. """ import re from unittest import skipUnless from urllib import urlencode import json import mock import ddt from django.conf import settings from django.core.urlresolvers import reverse from django.core import mail from django.contrib import messages from django.contrib.messages.middleware import MessageMiddleware from django.test import TestCase from django.test.utils import override_settings from django.http import HttpRequest from course_modes.models import CourseMode from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH from openedx.core.lib.js_utils import escape_json_dumps from student.tests.factories import UserFactory from student_account.views import account_settings_context from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin from util.testing import UrlResetMixin from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase @ddt.ddt class StudentAccountUpdateTest(UrlResetMixin, TestCase): """ Tests for the student account views that update the user's account information. """ USERNAME = u"heisenberg" ALTERNATE_USERNAME = u"walt" OLD_PASSWORD = u"ḅḷüëṡḳÿ" NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴" OLD_EMAIL = u"walter@graymattertech.com" NEW_EMAIL = u"walt@savewalterwhite.com" INVALID_ATTEMPTS = 100 INVALID_EMAILS = [ None, u"", u"a", "no_domain", "no+domain", "@", "@domain.com", "test@no_extension", # Long email -- subtract the length of the @domain # except for one character (so we exceed the max length limit) u"{user}@example.com".format( user=(u'e' * (EMAIL_MAX_LENGTH - 11)) ) ] INVALID_KEY = u"123abc" def setUp(self): super(StudentAccountUpdateTest, self).setUp("student_account.urls") # Create/activate a new account activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL) activate_account(activation_key) # Login result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD) self.assertTrue(result) @skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS') def test_password_change(self): # Request a password change while logged in, simulating # use of the password reset link from the account page response = self._change_password() self.assertEqual(response.status_code, 200) # Check that an email was sent self.assertEqual(len(mail.outbox), 1) # Retrieve the activation link from the email body email_body = mail.outbox[0].body result = re.search('(?P<url>https?://[^\s]+)', email_body) self.assertIsNot(result, None) activation_link = result.group('url') # Visit the activation link response = self.client.get(activation_link) self.assertEqual(response.status_code, 200) # Submit a new password and follow the redirect to the success page response = self.client.post( activation_link, # These keys are from the form on the current password reset confirmation page. {'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD}, follow=True ) self.assertEqual(response.status_code, 200) self.assertContains(response, "Your password has been set.") # Log the user out to clear session data self.client.logout() # Verify that the new password can be used to log in result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD) self.assertTrue(result) # Try reusing the activation link to change the password again response = self.client.post( activation_link, {'new_password1': self.OLD_PASSWORD, 'new_password2': self.OLD_PASSWORD}, follow=True ) self.assertEqual(response.status_code, 200) self.assertContains(response, "The password reset link was invalid, possibly because the link has already been used.") self.client.logout() # Verify that the old password cannot be used to log in result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD) self.assertFalse(result) # Verify that the new password continues to be valid result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD) self.assertTrue(result) @ddt.data(True, False) def test_password_change_logged_out(self, send_email): # Log the user out self.client.logout() # Request a password change while logged out, simulating # use of the password reset link from the login page if send_email: response = self._change_password(email=self.OLD_EMAIL) self.assertEqual(response.status_code, 200) else: # Don't send an email in the POST data, simulating # its (potentially accidental) omission in the POST # data sent from the login page response = self._change_password() self.assertEqual(response.status_code, 400) def test_password_change_inactive_user(self): # Log out the user created during test setup self.client.logout() # Create a second user, but do not activate it create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL) # Send the view the email address tied to the inactive user response = self._change_password(email=self.NEW_EMAIL) # Expect that the activation email is still sent, # since the user may have lost the original activation email. self.assertEqual(response.status_code, 200) self.assertEqual(len(mail.outbox), 1) def test_password_change_no_user(self): # Log out the user created during test setup self.client.logout() # Send the view an email address not tied to any user response = self._change_password(email=self.NEW_EMAIL) self.assertEqual(response.status_code, 400) def test_password_change_rate_limited(self): # Log out the user created during test setup, to prevent the view from # selecting the logged-in user's email address over the email provided # in the POST data self.client.logout() # Make many consecutive bad requests in an attempt to trigger the rate limiter for attempt in xrange(self.INVALID_ATTEMPTS): self._change_password(email=self.NEW_EMAIL) response = self._change_password(email=self.NEW_EMAIL) self.assertEqual(response.status_code, 403) @ddt.data( ('post', 'password_change_request', []), ) @ddt.unpack def test_require_http_method(self, correct_method, url_name, args): wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method} url = reverse(url_name, args=args) for method in wrong_methods: response = getattr(self.client, method)(url) self.assertEqual(response.status_code, 405) def _change_password(self, email=None): """Request to change the user's password. """ data = {} if email: data['email'] = email return self.client.post(path=reverse('password_change_request'), data=data) @ddt.ddt class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase): """ Tests for the student account views that update the user's account information. """ USERNAME = "bob" EMAIL = "bob@example.com" PASSWORD = "password" @mock.patch.dict(settings.FEATURES, {'EMBARGO': True}) def setUp(self): super(StudentAccountLoginAndRegistrationTest, self).setUp('embargo') # For these tests, two third party auth providers are enabled by default: self.configure_google_provider(enabled=True) self.configure_facebook_provider(enabled=True) @ddt.data( ("signin_user", "login"), ("register_user", "register"), ) @ddt.unpack def test_login_and_registration_form(self, url_name, initial_mode): response = self.client.get(reverse(url_name)) expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode) self.assertContains(response, expected_data) @ddt.data("signin_user", "register_user") def test_login_and_registration_form_already_authenticated(self, url_name): # Create/activate a new account and log in activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL) activate_account(activation_key) result = self.client.login(username=self.USERNAME, password=self.PASSWORD) self.assertTrue(result) # Verify that we're redirected to the dashboard response = self.client.get(reverse(url_name)) self.assertRedirects(response, reverse("dashboard")) @ddt.data( (False, "signin_user"), (False, "register_user"), (True, "signin_user"), (True, "register_user"), ) @ddt.unpack def test_login_and_registration_form_signin_preserves_params(self, is_edx_domain, url_name): params = [ ('course_id', 'edX/DemoX/Demo_Course'), ('enrollment_action', 'enroll'), ] # The response should have a "Sign In" button with the URL # that preserves the querystring params with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}): response = self.client.get(reverse(url_name), params) expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')])) self.assertContains(response, expected_url) # Add additional parameters: params = [ ('course_id', 'edX/DemoX/Demo_Course'), ('enrollment_action', 'enroll'), ('course_mode', CourseMode.DEFAULT_MODE_SLUG), ('email_opt_in', 'true'), ('next', '/custom/final/destination') ] # Verify that this parameter is also preserved with mock.patch.dict(settings.FEATURES, {'IS_EDX_DOMAIN': is_edx_domain}): response = self.client.get(reverse(url_name), params) expected_url = '/login?{}'.format(self._finish_auth_url_param(params)) self.assertContains(response, expected_url) @mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False}) @ddt.data("signin_user", "register_user") def test_third_party_auth_disabled(self, url_name): response = self.client.get(reverse(url_name)) self._assert_third_party_auth_data(response, None, None, []) @ddt.data( ("signin_user", None, None), ("register_user", None, None), ("signin_user", "google-oauth2", "Google"), ("register_user", "google-oauth2", "Google"), ("signin_user", "facebook", "Facebook"), ("register_user", "facebook", "Facebook"), ) @ddt.unpack def test_third_party_auth(self, url_name, current_backend, current_provider): params = [ ('course_id', 'course-v1:Org+Course+Run'), ('enrollment_action', 'enroll'), ('course_mode', CourseMode.DEFAULT_MODE_SLUG), ('email_opt_in', 'true'), ('next', '/custom/final/destination'), ] # Simulate a running pipeline if current_backend is not None: pipeline_target = "student_account.views.third_party_auth.pipeline" with simulate_running_pipeline(pipeline_target, current_backend): response = self.client.get(reverse(url_name), params) # Do NOT simulate a running pipeline else: response = self.client.get(reverse(url_name), params) # This relies on the THIRD_PARTY_AUTH configuration in the test settings expected_providers = [ { "id": "oa2-facebook", "name": "Facebook", "iconClass": "fa-facebook", "loginUrl": self._third_party_login_url("facebook", "login", params), "registerUrl": self._third_party_login_url("facebook", "register", params) }, { "id": "oa2-google-oauth2", "name": "Google", "iconClass": "fa-google-plus", "loginUrl": self._third_party_login_url("google-oauth2", "login", params), "registerUrl": self._third_party_login_url("google-oauth2", "register", params) } ] self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers) def test_hinted_login(self): params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")] response = self.client.get(reverse('signin_user'), params) self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"') @override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME) def test_microsite_uses_old_login_page(self): # Retrieve the login page from a microsite domain # and verify that we're served the old page. resp = self.client.get( reverse("signin_user"), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertContains(resp, "Log into your Test Microsite Account") self.assertContains(resp, "login-form") def test_microsite_uses_old_register_page(self): # Retrieve the register page from a microsite domain # and verify that we're served the old page. resp = self.client.get( reverse("register_user"), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertContains(resp, "Register for Test Microsite") self.assertContains(resp, "register-form") def test_login_registration_xframe_protected(self): resp = self.client.get( reverse("register_user"), {}, HTTP_REFERER="http://localhost/iframe" ) self.assertEqual(resp['X-Frame-Options'], 'DENY') self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True) resp = self.client.get( reverse("register_user"), HTTP_REFERER="http://localhost/iframe" ) self.assertEqual(resp['X-Frame-Options'], 'ALLOW') def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers): """Verify that third party auth info is rendered correctly in a DOM data attribute. """ finish_auth_url = None if current_backend: finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?" auth_info = { "currentProvider": current_provider, "providers": providers, "secondaryProviders": [], "finishAuthUrl": finish_auth_url, "errorMessage": None, } auth_info = escape_json_dumps(auth_info) expected_data = '"third_party_auth": {auth_info}'.format( auth_info=auth_info ) self.assertContains(response, expected_data) def _third_party_login_url(self, backend_name, auth_entry, login_params): """Construct the login URL to start third party authentication. """ return u"{url}?auth_entry={auth_entry}&{param_str}".format( url=reverse("social:begin", kwargs={"backend": backend_name}), auth_entry=auth_entry, param_str=self._finish_auth_url_param(login_params), ) def _finish_auth_url_param(self, params): """ Make the next=... URL parameter that indicates where the user should go next. >>> _finish_auth_url_param([('next', '/dashboard')]) '/account/finish_auth?next=%2Fdashboard' """ return urlencode({ 'next': '/account/finish_auth?{}'.format(urlencode(params)) }) class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase): """ Tests for the account settings view. """ USERNAME = 'student' PASSWORD = 'password' FIELDS = [ 'country', 'gender', 'language', 'level_of_education', 'password', 'year_of_birth', 'preferred_language', ] @mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage') def setUp(self): super(AccountSettingsViewTest, self).setUp() self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD) self.client.login(username=self.USERNAME, password=self.PASSWORD) self.request = HttpRequest() self.request.user = self.user # For these tests, two third party auth providers are enabled by default: self.configure_google_provider(enabled=True) self.configure_facebook_provider(enabled=True) # Python-social saves auth failure notifcations in Django messages. # See pipeline.get_duplicate_provider() for details. self.request.COOKIES = {} MessageMiddleware().process_request(self.request) messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook') def test_context(self): context = account_settings_context(self.request) user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username}) self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url) user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username}) self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url) for attribute in self.FIELDS: self.assertIn(attribute, context['fields']) self.assertEqual( context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username}) ) self.assertEqual( context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username}) ) self.assertEqual(context['duplicate_provider'], 'facebook') self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook') self.assertEqual(context['auth']['providers'][1]['name'], 'Google') def test_view(self): view_path = reverse('account_settings') response = self.client.get(path=view_path) for attribute in self.FIELDS: self.assertIn(attribute, response.content) @override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME) class MicrositeLogistrationTests(TestCase): """ Test to validate that microsites can display the logistration page """ def test_login_page(self): """ Make sure that we get the expected logistration page on our specialized microsite """ resp = self.client.get( reverse('signin_user'), HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME ) self.assertEqual(resp.status_code, 200) self.assertIn('<div id="login-and-registration-container"', resp.content) def test_registration_page(self): """ Make sure that we get the expected logistration page on our specialized microsite """ resp = self.client.get( reverse('register_user'), HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME ) self.assertEqual(resp.status_code, 200) self.assertIn('<div id="login-and-registration-container"', resp.content) @override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME) def test_no_override(self): """ Make sure we get the old style login/registration if we don't override """ resp = self.client.get( reverse('signin_user'), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertEqual(resp.status_code, 200) self.assertNotIn('<div id="login-and-registration-container"', resp.content) resp = self.client.get( reverse('register_user'), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME ) self.assertEqual(resp.status_code, 200) self.assertNotIn('<div id="login-and-registration-container"', resp.content)
agpl-3.0
Freso/botbot-web
botbot/apps/kudos/migrations/0001_initial.py
2
11323
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Kudos' db.create_table(u'kudos_kudos', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('nick', self.gf('django.db.models.fields.CharField')(max_length=255)), ('channel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['bots.Channel'])), ('count', self.gf('django.db.models.fields.PositiveIntegerField')()), ('first', self.gf('django.db.models.fields.DateTimeField')()), ('recent', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal(u'kudos', ['Kudos']) # Adding unique constraint on 'Kudos', fields ['nick', 'channel'] db.create_unique(u'kudos_kudos', ['nick', 'channel_id']) # Adding model 'KudosTotal' db.create_table(u'kudos_kudostotal', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('channel', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['bots.Channel'], unique=True)), ('kudos_given', self.gf('django.db.models.fields.PositiveIntegerField')()), ('message_count', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal(u'kudos', ['KudosTotal']) def backwards(self, orm): # Removing unique constraint on 'Kudos', fields ['nick', 'channel'] db.delete_unique(u'kudos_kudos', ['nick', 'channel_id']) # Deleting model 'Kudos' db.delete_table(u'kudos_kudos') # Deleting model 'KudosTotal' db.delete_table(u'kudos_kudostotal') models = { u'accounts.membership': { 'Meta': {'unique_together': "(('user', 'channel'),)", 'object_name': 'Membership'}, 'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bots.Channel']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_owner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'kind': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '30'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"}) }, u'accounts.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'nick': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'timezone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'bots.channel': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('slug', 'chatbot'), ('name', 'chatbot'))", 'object_name': 'Channel'}, 'chatbot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bots.ChatBot']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'fingerprint': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'plugins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['plugins.Plugin']", 'through': u"orm['plugins.ActivePlugin']", 'symmetrical': 'False'}), 'private_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'public_kudos': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.User']", 'through': u"orm['accounts.Membership']", 'symmetrical': 'False'}) }, u'bots.chatbot': { 'Meta': {'object_name': 'ChatBot'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'max_channels': ('django.db.models.fields.IntegerField', [], {'default': '200'}), 'nick': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'real_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'server_identifier': ('django.db.models.fields.CharField', [], {'max_length': '164'}), 'server_password': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'kudos.kudos': { 'Meta': {'unique_together': "((u'nick', u'channel'),)", 'object_name': 'Kudos'}, 'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bots.Channel']"}), 'count': ('django.db.models.fields.PositiveIntegerField', [], {}), 'first': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nick': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'recent': ('django.db.models.fields.DateTimeField', [], {}) }, u'kudos.kudostotal': { 'Meta': {'object_name': 'KudosTotal'}, 'channel': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bots.Channel']", 'unique': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kudos_given': ('django.db.models.fields.PositiveIntegerField', [], {}), 'message_count': ('django.db.models.fields.PositiveIntegerField', [], {}) }, u'plugins.activeplugin': { 'Meta': {'object_name': 'ActivePlugin'}, 'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bots.Channel']"}), 'configuration': ('botbot.core.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'plugin': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['plugins.Plugin']"}) }, u'plugins.plugin': { 'Meta': {'object_name': 'Plugin'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) } } complete_apps = ['kudos']
mit
dellysunnymtech/bitbake
lib/bb/cache_extra.py
14
3113
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- # # Extra RecipeInfo will be all defined in this file. Currently, # Only Hob (Image Creator) Requests some extra fields. So # HobRecipeInfo is defined. It's named HobRecipeInfo because it # is introduced by 'hob'. Users could also introduce other # RecipeInfo or simply use those already defined RecipeInfo. # In the following patch, this newly defined new extra RecipeInfo # will be dynamically loaded and used for loading/saving the extra # cache fields # Copyright (C) 2011, Intel Corporation. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from bb.cache import RecipeInfoCommon class HobRecipeInfo(RecipeInfoCommon): __slots__ = () classname = "HobRecipeInfo" # please override this member with the correct data cache file # such as (bb_cache.dat, bb_extracache_hob.dat) cachefile = "bb_extracache_" + classname +".dat" # override this member with the list of extra cache fields # that this class will provide cachefields = ['summary', 'license', 'section', 'description', 'homepage', 'bugtracker', 'prevision', 'files_info'] def __init__(self, filename, metadata): self.summary = self.getvar('SUMMARY', metadata) self.license = self.getvar('LICENSE', metadata) self.section = self.getvar('SECTION', metadata) self.description = self.getvar('DESCRIPTION', metadata) self.homepage = self.getvar('HOMEPAGE', metadata) self.bugtracker = self.getvar('BUGTRACKER', metadata) self.prevision = self.getvar('PR', metadata) self.files_info = self.getvar('FILES_INFO', metadata) @classmethod def init_cacheData(cls, cachedata): # CacheData in Hob RecipeInfo Class cachedata.summary = {} cachedata.license = {} cachedata.section = {} cachedata.description = {} cachedata.homepage = {} cachedata.bugtracker = {} cachedata.prevision = {} cachedata.files_info = {} def add_cacheData(self, cachedata, fn): cachedata.summary[fn] = self.summary cachedata.license[fn] = self.license cachedata.section[fn] = self.section cachedata.description[fn] = self.description cachedata.homepage[fn] = self.homepage cachedata.bugtracker[fn] = self.bugtracker cachedata.prevision[fn] = self.prevision cachedata.files_info[fn] = self.files_info
gpl-2.0
followloda/PornGuys
FlaskServer/venv/Lib/encodings/__init__.py
406
5698
""" Standard "encodings" Package Standard Python encoding modules are stored in this package directory. Codec modules must have names corresponding to normalized encoding names as defined in the normalize_encoding() function below, e.g. 'utf-8' must be implemented by the module 'utf_8.py'. Each codec module must export the following interface: * getregentry() -> codecs.CodecInfo object The getregentry() API must a CodecInfo object with encoder, decoder, incrementalencoder, incrementaldecoder, streamwriter and streamreader atttributes which adhere to the Python Codec Interface Standard. In addition, a module may optionally also define the following APIs which are then used by the package's codec search function: * getaliases() -> sequence of encoding name strings to use as aliases Alias names returned by getaliases() must be normalized encoding names as defined by normalize_encoding(). Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import codecs from encodings import aliases import __builtin__ _cache = {} _unknown = '--unknown--' _import_tail = ['*'] _norm_encoding_map = (' . ' '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' ' abcdefghijklmnopqrstuvwxyz ' ' ' ' ' ' ') _aliases = aliases.aliases class CodecRegistryError(LookupError, SystemError): pass def normalize_encoding(encoding): """ Normalize an encoding name. Normalization works as follows: all non-alphanumeric characters except the dot used for Python package names are collapsed and replaced with a single underscore, e.g. ' -;#' becomes '_'. Leading and trailing underscores are removed. Note that encoding names should be ASCII only; if they do use non-ASCII characters, these must be Latin-1 compatible. """ # Make sure we have an 8-bit string, because .translate() works # differently for Unicode strings. if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode): # Note that .encode('latin-1') does *not* use the codec # registry, so this call doesn't recurse. (See unicodeobject.c # PyUnicode_AsEncodedString() for details) encoding = encoding.encode('latin-1') return '_'.join(encoding.translate(_norm_encoding_map).split()) def search_function(encoding): # Cache lookup entry = _cache.get(encoding, _unknown) if entry is not _unknown: return entry # Import the module: # # First try to find an alias for the normalized encoding # name and lookup the module using the aliased name, then try to # lookup the module using the standard import scheme, i.e. first # try in the encodings package, then at top-level. # norm_encoding = normalize_encoding(encoding) aliased_encoding = _aliases.get(norm_encoding) or \ _aliases.get(norm_encoding.replace('.', '_')) if aliased_encoding is not None: modnames = [aliased_encoding, norm_encoding] else: modnames = [norm_encoding] for modname in modnames: if not modname or '.' in modname: continue try: # Import is absolute to prevent the possibly malicious import of a # module with side-effects that is not in the 'encodings' package. mod = __import__('encodings.' + modname, fromlist=_import_tail, level=0) except ImportError: pass else: break else: mod = None try: getregentry = mod.getregentry except AttributeError: # Not a codec module mod = None if mod is None: # Cache misses _cache[encoding] = None return None # Now ask the module for the registry entry entry = getregentry() if not isinstance(entry, codecs.CodecInfo): if not 4 <= len(entry) <= 7: raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) if not hasattr(entry[0], '__call__') or \ not hasattr(entry[1], '__call__') or \ (entry[2] is not None and not hasattr(entry[2], '__call__')) or \ (entry[3] is not None and not hasattr(entry[3], '__call__')) or \ (len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \ (len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')): raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) if len(entry)<7 or entry[6] is None: entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) entry = codecs.CodecInfo(*entry) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if alias not in _aliases: _aliases[alias] = modname # Return the registry entry return entry # Register the search_function in the Python codec registry codecs.register(search_function)
gpl-3.0
anvil8/django-admin-tools
admin_tools/dashboard/registry.py
14
1751
#coding: utf-8 class Registry(object): """ Registry for application dashboards. """ registry = {} def register(cls, klass, app_name): from admin_tools.dashboard.dashboards import Dashboard if not issubclass(klass, Dashboard): raise ValueError('%s is not an instance of Dashboard' % klass) if app_name in cls.registry: raise ValueError('A dashboard has already been registered for ' 'the application "%s"', app_name) cls.registry[app_name] = klass register = classmethod(register) def register(cls, *args, **kwargs): """ Register a custom dashboard into the global registry. """ Registry.register(cls, *args, **kwargs) def autodiscover(blacklist=[]): """ Automagically discover custom dashboards and menus for installed apps. Optionally you can pass a ``blacklist`` of apps that you don't want to provide their own app index dashboard. """ import imp from django.conf import settings from django.utils.importlib import import_module blacklist.append('admin_tools.dashboard') blacklist.append('admin_tools.menu') blacklist.append('admin_tools.theming') for app in settings.INSTALLED_APPS: # skip blacklisted apps if app in blacklist: continue # try to import the app try: app_path = import_module(app).__path__ except AttributeError: continue # try to find a app.dashboard module try: imp.find_module('dashboard', app_path) except ImportError: continue # looks like we found it so import it ! import_module('%s.dashboard' % app)
mit
pauljxtan/pystuff
pymahjong/yaku.py
1
5711
from tile import (NumberedTile, ManTile, PinTile, SouTile, WindTile, DragonTile, TERMINALS, HONOURS, WIND_TILES, DRAGONS, NUMBERS, SUITS, NUMBERED_TILE_TYPES, Sequence, Pair, Triplet) from utils import (flatten_groups, group_concealed_tiles_and_convert, get_sequences, get_triplets, get_quadruplets, get_plets, get_suit_counts_groups) #### Yaku def is_tanyao(groups): """ Returns True if the hand satisfies tanyao. """ tiles = flatten_groups(groups) return all([isinstance(tile, NumberedTile) and tile.number in range(2, 9) for tile in tiles]) def is_pinfu(groups, winning_tile): if get_plets(groups): return False return # TODO: get wait type def is_iipeikou(groups): sequences = get_sequences(groups) return (not is_ryanpeikou(groups) and any([sequences.count(sequence) == 2 for sequence in sequences])) def is_yakuhai(groups, prevailing_wind, player_wind): """ TODO: need to count multiple yakuhai """ plets = get_plets(groups) valid_honours = ((WindTile(prevailing_wind), WindTile(player_wind)) + HONOURS[4:]) return sum([plet.tiles[0] in valid_honours for plet in get_plets(groups)]) >= 1 def is_chanta(groups): return all([any([tile in group.tiles for tile in TERMINALS + HONOURS]) for group in groups]) def is_sanshoku_doujun(groups): sequences = get_sequences(groups) if len(sequences) < 3: return False suit_counts = get_suit_counts_groups(sequences) if 3 in suit_counts or 4 in suit_counts: return False heads = [sequence.tiles[0] for sequence in sequences] return # TODO def is_toitoi(groups): return len(get_plets(groups)) == 4 def is_sanankou(groups): plets = get_plets(groups) if len(plets) < 3: return False return sum([plet.closed for plet in plets]) == 3 def is_sanshoku_doukou(groups): plets = get_plets(groups) if len(plets) < 3: return False numbers = [plet.tiles[0].number for plet in plets] return any([numbers.count(number) == 3 for number in NUMBERS]) def is_sankantsu(groups): return len(get_quadruplets(groups)) == 3 def is_chitoi(tiles): """ Returns True if the hand satisfies chitoitsu. """ unique_tiles = set(tiles) return (len(unique_tiles) == 7 and all([tiles.count(tile) == 2 for tile in unique_tiles])) def is_honroutou(groups): return def is_shousangen(groups): if len(get_triplets(groups)) < 2: return False return (any([Pair(dragon) in groups for dragon in DRAGONS]) and sum([Triplet(dragon) in groups for dragon in DRAGONS]) == 2) def is_ittsu(groups): sequences = get_sequences(groups) if len(sequences) < 3: return False suit_counts = get_suit_counts_groups(sequences) if not (3 in suit_counts.values() or 4 in suit_counts.values()): return False suit = [suit for suit, count in suit_counts.iteritems() if count >= 3][0] return (Sequence(suit(1)) in groups and Sequence(suit(4)) in groups and Sequence(suit(7)) in groups) def is_ryanpeikou(groups): """ Note: Chitoitsu is implicitly excluded since it is checked as individual tiles instead of groups; see is_chitoi(). """ sequences = get_sequences(groups) if len(sequences) < 4: return False return sum([sequences.count(sequence) == 2 for sequence in set(sequences)]) == 2 def is_honitsu(groups): return not is_chinitsu(groups) and True # TODO def is_junchan(groups): return def is_chinitsu(groups): tiles = flatten_groups(groups) return any([all([isinstance(tile, numbered_tile) for tile in tiles]) for numbered_tile in NUMBERED_TILE_TYPES]) #### Yakuman def is_kokushi(tiles): """ Returns True if the hand satisfies kokushi musou. """ return set(tiles) == set(TERMINALS) | set(HONOURS) #### Info class YakuInfo(object): def __init__(self, name, han_closed, han_open, func): self.name = name self.han_closed = han_closed self.han_open = han_open self.func = func # TODO: separate by han values? YAKU_INFO = { # 1 han 'rch': YakuInfo("Riichi", 1, 0, None), 'ipp': YakuInfo("Ippatsu", 1, 0, None), 'smo': YakuInfo("Menzenchin tsumohou", 1, 0, None), 'pfu': YakuInfo("pinfu", 1, 0, is_pinfu), 'ipk': YakuInfo("Iipeikou", 1, 0, is_iipeikou), 'hai': YakuInfo("Haitei raoyue", 1, 1, None), 'hou': YakuInfo("Houtei raoyui", 1, 1, None), 'rin': YakuInfo("Rinshan kaihou", 1, 1, None), 'chk': YakuInfo("Chankan", 1, 1, None), 'tan': YakuInfo("Tanyao", 1, 1, is_tanyao), 'yak': YakuInfo("Yakuhai", 1, 1, is_yakuhai), # 2 han 'dri': YakuInfo("Double riichi", 2, 0, None), 'cha': YakuInfo("Honchantaiyaochuu", 2, 1, is_chanta), 'sdj': YakuInfo("Sanshoku doujun", 2, 1, is_sanshoku_doujun), 'itt': YakuInfo("Ikkitsuukan", 2, 1, is_ittsu), 'toi': YakuInfo("Toitoihou", 2, 2, is_toitoi), 'sna': YakuInfo("Sanankou", 2, 2, is_sanankou), 'sdo': YakuInfo("Sanshoku doukou", 2, 2, is_sanshoku_doukou), 'snk': YakuInfo("Sankantsu", 2, 2, is_sankantsu), 'chi': YakuInfo("Chitoitsu", 2, 0, is_chitoi), 'hro': YakuInfo("Honroutou", 2, 2, is_honroutou), 'ssg': YakuInfo("Shousangen", 2, 2, is_shousangen), # 3 han 'hon': YakuInfo("Honitsu", 3, 2, is_honitsu), 'jun': YakuInfo("Junchantaiyaochuu", 3, 2, is_junchan), 'rpk': YakuInfo("Ryanpeikou", 3, 0, is_ryanpeikou), # 6 han 'chn': YakuInfo("Chinitsu", 6, 5, is_chinitsu) } YAKUMAN_INFO = {}
mit
PhilLidar-DAD/geonode
geonode/base/templatetags/base_tags.py
1
5332
from django import template from agon_ratings.models import Rating from django.contrib.contenttypes.models import ContentType from django.contrib.auth import get_user_model from django.db.models import Count from guardian.shortcuts import get_objects_for_user #from geonode import settings from django.conf import settings from geonode.layers.models import Layer from geonode.maps.models import Map from geonode.documents.models import Document from geonode.groups.models import GroupProfile import urllib2 from urllib2 import HTTPError import json from django.core.urlresolvers import resolve from django.db.models import Q from geoserver.catalog import Catalog import geonode.settings as settings register = template.Library() @register.inclusion_tag('phil-ext.html',takes_context=True) def get_public_location(context): public_location = str(settings.OGC_SERVER['default']['PUBLIC_LOCATION']) return public_location @register.assignment_tag def get_orthophotos(takes_context=True): orthophotos = Layer.objects.get(name='orthophotos_resampled') return str(orthophotos.typename) @register.inclusion_tag('index.html',takes_context=True) def get_philgrid(context): cat = Catalog(settings.OGC_SERVER['default']['LOCATION'] + 'rest', username=settings.OGC_SERVER['default']['USER'], password=settings.OGC_SERVER['default']['PASSWORD']) philgrid = Layer.objects.get(name__icontains="philgrid") # resource = philgrid.resource gs_layer = cat.get_layer(philgrid.name) resource = gs_layer.resource return resource @register.assignment_tag def get_fhm_count(takes_context=True): try: visit_url = 'https://lipad-fmc.dream.upd.edu.ph/api/layers/' #?keyword__contains=hazard' response = urllib2.urlopen(visit_url) data = json.loads(response.read()) fhm_count = data['meta']['total_count'] except: fhm_count = "N/A" return fhm_count @register.assignment_tag def get_resourceLayers_count(takes_context=True): urls_to_visit = [links for links in settings.LIPAD_INSTANCES if links != 'https://lipad-fmc.dream.upd.edu.ph/'] rl_count = Layer.objects.filter(keywords__name__icontains="phillidar2").count() for visit_url in urls_to_visit: try: response = urllib2.urlopen(visit_url+'api/total_count', timeout = 1) data = json.loads(response.read()) rl_count += data['total_count'] except: rl_count += 0 return rl_count @register.assignment_tag def get_fhm_fmc_url(takes_context=True): fhm = settings.LIPAD_FMC_FHM_URL return fhm @register.assignment_tag def get_public_location(takes_context=True): pl = settings.OGC_SERVER['default']['PUBLIC_LOCATION'] return pl @register.assignment_tag def num_ratings(obj): ct = ContentType.objects.get_for_model(obj) return len(Rating.objects.filter(object_id=obj.pk, content_type=ct)) @register.assignment_tag(takes_context=True) def facets(context): request = context['request'] title_filter = request.GET.get('title__icontains', '') facet_type = context['facet_type'] if 'facet_type' in context else 'all' if not settings.SKIP_PERMS_FILTER: authorized = get_objects_for_user( request.user, 'base.view_resourcebase').values('id') if facet_type == 'documents': documents = Document.objects.filter(title__icontains=title_filter) if settings.RESOURCE_PUBLISHING: documents = documents.filter(is_published=True) if not settings.SKIP_PERMS_FILTER: documents = documents.filter(id__in=authorized) counts = documents.values('doc_type').annotate(count=Count('doc_type')) facets = dict([(count['doc_type'], count['count']) for count in counts]) return facets else: layers = Layer.objects.filter(title__icontains=title_filter) if settings.RESOURCE_PUBLISHING: layers = layers.filter(is_published=True) if not settings.SKIP_PERMS_FILTER: layers = layers.filter(id__in=authorized) counts = layers.values('storeType').annotate(count=Count('storeType')) count_dict = dict([(count['storeType'], count['count']) for count in counts]) facets = { 'raster': count_dict.get('coverageStore', 0), 'vector': count_dict.get('dataStore', 0), 'remote': count_dict.get('remoteStore', 0), } # Break early if only_layers is set. if facet_type == 'layers': return facets maps = Map.objects.filter(title__icontains=title_filter) documents = Document.objects.filter(title__icontains=title_filter) if not settings.SKIP_PERMS_FILTER: maps = maps.filter(id__in=authorized) documents = documents.filter(id__in=authorized) facets['map'] = maps.count() facets['document'] = documents.count() if facet_type == 'home': facets['user'] = get_user_model().objects.exclude( username='AnonymousUser').count() facets['group'] = GroupProfile.objects.exclude( access="private").count() facets['layer'] = facets['raster'] + \ facets['vector'] + facets['remote'] return facets
gpl-3.0
jherico/qtwebkit
Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
118
4454
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from datetime import datetime import unittest2 as unittest from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.thirdparty.mock import Mock from webkitpy.tool.bot.feeders import * from webkitpy.tool.mocktool import MockTool class FeedersTest(unittest.TestCase): def test_commit_queue_feeder(self): feeder = CommitQueueFeeder(MockTool()) expected_logs = """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com) Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com) MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\nnon-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py. - If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags. - If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.' MOCK: update_work_items: commit-queue [10005, 10000] Feeding commit-queue items [10005, 10000] """ OutputCapture().assert_outputs(self, feeder.feed, expected_logs=expected_logs) def _mock_attachment(self, is_rollout, attach_date): attachment = Mock() attachment.is_rollout = lambda: is_rollout attachment.attach_date = lambda: attach_date return attachment def test_patch_cmp(self): long_ago_date = datetime(1900, 1, 21) recent_date = datetime(2010, 1, 21) attachment1 = self._mock_attachment(is_rollout=False, attach_date=recent_date) attachment2 = self._mock_attachment(is_rollout=False, attach_date=long_ago_date) attachment3 = self._mock_attachment(is_rollout=True, attach_date=recent_date) attachment4 = self._mock_attachment(is_rollout=True, attach_date=long_ago_date) attachments = [attachment1, attachment2, attachment3, attachment4] expected_sort = [attachment4, attachment3, attachment2, attachment1] queue = CommitQueueFeeder(MockTool()) attachments.sort(queue._patch_cmp) self.assertEqual(attachments, expected_sort) def test_patches_with_acceptable_review_flag(self): class MockPatch(object): def __init__(self, patch_id, review): self.id = patch_id self.review = lambda: review feeder = CommitQueueFeeder(MockTool()) patches = [MockPatch(1, None), MockPatch(2, '-'), MockPatch(3, "+")] self.assertEqual([patch.id for patch in feeder._patches_with_acceptable_review_flag(patches)], [1, 3])
lgpl-3.0
sursum/buckanjaren
buckanjaren/lib/python3.5/site-packages/pip/_vendor/webencodings/labels.py
512
8979
""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { 'unicode-1-1-utf-8': 'utf-8', 'utf-8': 'utf-8', 'utf8': 'utf-8', '866': 'ibm866', 'cp866': 'ibm866', 'csibm866': 'ibm866', 'ibm866': 'ibm866', 'csisolatin2': 'iso-8859-2', 'iso-8859-2': 'iso-8859-2', 'iso-ir-101': 'iso-8859-2', 'iso8859-2': 'iso-8859-2', 'iso88592': 'iso-8859-2', 'iso_8859-2': 'iso-8859-2', 'iso_8859-2:1987': 'iso-8859-2', 'l2': 'iso-8859-2', 'latin2': 'iso-8859-2', 'csisolatin3': 'iso-8859-3', 'iso-8859-3': 'iso-8859-3', 'iso-ir-109': 'iso-8859-3', 'iso8859-3': 'iso-8859-3', 'iso88593': 'iso-8859-3', 'iso_8859-3': 'iso-8859-3', 'iso_8859-3:1988': 'iso-8859-3', 'l3': 'iso-8859-3', 'latin3': 'iso-8859-3', 'csisolatin4': 'iso-8859-4', 'iso-8859-4': 'iso-8859-4', 'iso-ir-110': 'iso-8859-4', 'iso8859-4': 'iso-8859-4', 'iso88594': 'iso-8859-4', 'iso_8859-4': 'iso-8859-4', 'iso_8859-4:1988': 'iso-8859-4', 'l4': 'iso-8859-4', 'latin4': 'iso-8859-4', 'csisolatincyrillic': 'iso-8859-5', 'cyrillic': 'iso-8859-5', 'iso-8859-5': 'iso-8859-5', 'iso-ir-144': 'iso-8859-5', 'iso8859-5': 'iso-8859-5', 'iso88595': 'iso-8859-5', 'iso_8859-5': 'iso-8859-5', 'iso_8859-5:1988': 'iso-8859-5', 'arabic': 'iso-8859-6', 'asmo-708': 'iso-8859-6', 'csiso88596e': 'iso-8859-6', 'csiso88596i': 'iso-8859-6', 'csisolatinarabic': 'iso-8859-6', 'ecma-114': 'iso-8859-6', 'iso-8859-6': 'iso-8859-6', 'iso-8859-6-e': 'iso-8859-6', 'iso-8859-6-i': 'iso-8859-6', 'iso-ir-127': 'iso-8859-6', 'iso8859-6': 'iso-8859-6', 'iso88596': 'iso-8859-6', 'iso_8859-6': 'iso-8859-6', 'iso_8859-6:1987': 'iso-8859-6', 'csisolatingreek': 'iso-8859-7', 'ecma-118': 'iso-8859-7', 'elot_928': 'iso-8859-7', 'greek': 'iso-8859-7', 'greek8': 'iso-8859-7', 'iso-8859-7': 'iso-8859-7', 'iso-ir-126': 'iso-8859-7', 'iso8859-7': 'iso-8859-7', 'iso88597': 'iso-8859-7', 'iso_8859-7': 'iso-8859-7', 'iso_8859-7:1987': 'iso-8859-7', 'sun_eu_greek': 'iso-8859-7', 'csiso88598e': 'iso-8859-8', 'csisolatinhebrew': 'iso-8859-8', 'hebrew': 'iso-8859-8', 'iso-8859-8': 'iso-8859-8', 'iso-8859-8-e': 'iso-8859-8', 'iso-ir-138': 'iso-8859-8', 'iso8859-8': 'iso-8859-8', 'iso88598': 'iso-8859-8', 'iso_8859-8': 'iso-8859-8', 'iso_8859-8:1988': 'iso-8859-8', 'visual': 'iso-8859-8', 'csiso88598i': 'iso-8859-8-i', 'iso-8859-8-i': 'iso-8859-8-i', 'logical': 'iso-8859-8-i', 'csisolatin6': 'iso-8859-10', 'iso-8859-10': 'iso-8859-10', 'iso-ir-157': 'iso-8859-10', 'iso8859-10': 'iso-8859-10', 'iso885910': 'iso-8859-10', 'l6': 'iso-8859-10', 'latin6': 'iso-8859-10', 'iso-8859-13': 'iso-8859-13', 'iso8859-13': 'iso-8859-13', 'iso885913': 'iso-8859-13', 'iso-8859-14': 'iso-8859-14', 'iso8859-14': 'iso-8859-14', 'iso885914': 'iso-8859-14', 'csisolatin9': 'iso-8859-15', 'iso-8859-15': 'iso-8859-15', 'iso8859-15': 'iso-8859-15', 'iso885915': 'iso-8859-15', 'iso_8859-15': 'iso-8859-15', 'l9': 'iso-8859-15', 'iso-8859-16': 'iso-8859-16', 'cskoi8r': 'koi8-r', 'koi': 'koi8-r', 'koi8': 'koi8-r', 'koi8-r': 'koi8-r', 'koi8_r': 'koi8-r', 'koi8-u': 'koi8-u', 'csmacintosh': 'macintosh', 'mac': 'macintosh', 'macintosh': 'macintosh', 'x-mac-roman': 'macintosh', 'dos-874': 'windows-874', 'iso-8859-11': 'windows-874', 'iso8859-11': 'windows-874', 'iso885911': 'windows-874', 'tis-620': 'windows-874', 'windows-874': 'windows-874', 'cp1250': 'windows-1250', 'windows-1250': 'windows-1250', 'x-cp1250': 'windows-1250', 'cp1251': 'windows-1251', 'windows-1251': 'windows-1251', 'x-cp1251': 'windows-1251', 'ansi_x3.4-1968': 'windows-1252', 'ascii': 'windows-1252', 'cp1252': 'windows-1252', 'cp819': 'windows-1252', 'csisolatin1': 'windows-1252', 'ibm819': 'windows-1252', 'iso-8859-1': 'windows-1252', 'iso-ir-100': 'windows-1252', 'iso8859-1': 'windows-1252', 'iso88591': 'windows-1252', 'iso_8859-1': 'windows-1252', 'iso_8859-1:1987': 'windows-1252', 'l1': 'windows-1252', 'latin1': 'windows-1252', 'us-ascii': 'windows-1252', 'windows-1252': 'windows-1252', 'x-cp1252': 'windows-1252', 'cp1253': 'windows-1253', 'windows-1253': 'windows-1253', 'x-cp1253': 'windows-1253', 'cp1254': 'windows-1254', 'csisolatin5': 'windows-1254', 'iso-8859-9': 'windows-1254', 'iso-ir-148': 'windows-1254', 'iso8859-9': 'windows-1254', 'iso88599': 'windows-1254', 'iso_8859-9': 'windows-1254', 'iso_8859-9:1989': 'windows-1254', 'l5': 'windows-1254', 'latin5': 'windows-1254', 'windows-1254': 'windows-1254', 'x-cp1254': 'windows-1254', 'cp1255': 'windows-1255', 'windows-1255': 'windows-1255', 'x-cp1255': 'windows-1255', 'cp1256': 'windows-1256', 'windows-1256': 'windows-1256', 'x-cp1256': 'windows-1256', 'cp1257': 'windows-1257', 'windows-1257': 'windows-1257', 'x-cp1257': 'windows-1257', 'cp1258': 'windows-1258', 'windows-1258': 'windows-1258', 'x-cp1258': 'windows-1258', 'x-mac-cyrillic': 'x-mac-cyrillic', 'x-mac-ukrainian': 'x-mac-cyrillic', 'chinese': 'gbk', 'csgb2312': 'gbk', 'csiso58gb231280': 'gbk', 'gb2312': 'gbk', 'gb_2312': 'gbk', 'gb_2312-80': 'gbk', 'gbk': 'gbk', 'iso-ir-58': 'gbk', 'x-gbk': 'gbk', 'gb18030': 'gb18030', 'hz-gb-2312': 'hz-gb-2312', 'big5': 'big5', 'big5-hkscs': 'big5', 'cn-big5': 'big5', 'csbig5': 'big5', 'x-x-big5': 'big5', 'cseucpkdfmtjapanese': 'euc-jp', 'euc-jp': 'euc-jp', 'x-euc-jp': 'euc-jp', 'csiso2022jp': 'iso-2022-jp', 'iso-2022-jp': 'iso-2022-jp', 'csshiftjis': 'shift_jis', 'ms_kanji': 'shift_jis', 'shift-jis': 'shift_jis', 'shift_jis': 'shift_jis', 'sjis': 'shift_jis', 'windows-31j': 'shift_jis', 'x-sjis': 'shift_jis', 'cseuckr': 'euc-kr', 'csksc56011987': 'euc-kr', 'euc-kr': 'euc-kr', 'iso-ir-149': 'euc-kr', 'korean': 'euc-kr', 'ks_c_5601-1987': 'euc-kr', 'ks_c_5601-1989': 'euc-kr', 'ksc5601': 'euc-kr', 'ksc_5601': 'euc-kr', 'windows-949': 'euc-kr', 'csiso2022kr': 'iso-2022-kr', 'iso-2022-kr': 'iso-2022-kr', 'utf-16be': 'utf-16be', 'utf-16': 'utf-16le', 'utf-16le': 'utf-16le', 'x-user-defined': 'x-user-defined', }
mit
SebDieBln/QGIS
python/plugins/processing/modeler/MultilineTextPanel.py
24
2656
# -*- coding: utf-8 -*- """ *************************************************************************** MultilineTextPanel.py --------------------- Date : January 2013 Copyright : (C) 2013 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'January 2013' __copyright__ = '(C) 2013, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from PyQt4 import QtGui class MultilineTextPanel(QtGui.QWidget): USE_TEXT = 0 def __init__(self, options, parent=None): super(MultilineTextPanel, self).__init__(parent) self.options = options self.verticalLayout = QtGui.QVBoxLayout(self) self.verticalLayout.setSpacing(2) self.verticalLayout.setMargin(0) self.combo = QtGui.QComboBox() self.combo.addItem(self.tr('[Use text below]')) for option in options: self.combo.addItem(option[0], option[1]) self.combo.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) self.verticalLayout.addWidget(self.combo) self.textBox = QtGui.QPlainTextEdit() self.verticalLayout.addWidget(self.textBox) self.setLayout(self.verticalLayout) def setText(self, text): self.textBox.setPlainText(text) def getOption(self): return self.combo.currentIndex() def getValue(self): if self.combo.currentIndex() == 0: return unicode(self.textBox.toPlainText()) else: return self.combo.itemData(self.combo.currentIndex()) def setValue(self, value): items = [self.combo.itemData(i) for i in range(1, self.combo.count())] for idx, item in enumerate(items): if item == value: self.combo.setCurrentIndex(idx) return self.combo.setCurrentIndex(0) if value: self.textBox.setPlainText(value)
gpl-2.0
ak2703/edx-platform
common/djangoapps/track/tests/test_shim.py
111
4737
"""Ensure emitted events contain the fields legacy processors expect to find.""" from mock import sentinel from django.test.utils import override_settings from openedx.core.lib.tests.assertions.events import assert_events_equal from track.tests import EventTrackingTestCase, FROZEN_TIME LEGACY_SHIM_PROCESSOR = [ { 'ENGINE': 'track.shim.LegacyFieldMappingProcessor' } ] GOOGLE_ANALYTICS_PROCESSOR = [ { 'ENGINE': 'track.shim.GoogleAnalyticsProcessor' } ] @override_settings( EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR, ) class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase): """Ensure emitted events contain the fields legacy processors expect to find.""" def test_event_field_mapping(self): data = {sentinel.key: sentinel.value} context = { 'accept_language': sentinel.accept_language, 'referer': sentinel.referer, 'username': sentinel.username, 'session': sentinel.session, 'ip': sentinel.ip, 'host': sentinel.host, 'agent': sentinel.agent, 'path': sentinel.path, 'user_id': sentinel.user_id, 'course_id': sentinel.course_id, 'org_id': sentinel.org_id, 'client_id': sentinel.client_id, } with self.tracker.context('test', context): self.tracker.emit(sentinel.name, data) emitted_event = self.get_event() expected_event = { 'accept_language': sentinel.accept_language, 'referer': sentinel.referer, 'event_type': sentinel.name, 'name': sentinel.name, 'context': { 'user_id': sentinel.user_id, 'course_id': sentinel.course_id, 'org_id': sentinel.org_id, 'path': sentinel.path, }, 'event': data, 'username': sentinel.username, 'event_source': 'server', 'time': FROZEN_TIME, 'agent': sentinel.agent, 'host': sentinel.host, 'ip': sentinel.ip, 'page': None, 'session': sentinel.session, } assert_events_equal(expected_event, emitted_event) def test_missing_fields(self): self.tracker.emit(sentinel.name) emitted_event = self.get_event() expected_event = { 'accept_language': '', 'referer': '', 'event_type': sentinel.name, 'name': sentinel.name, 'context': {}, 'event': {}, 'username': '', 'event_source': 'server', 'time': FROZEN_TIME, 'agent': '', 'host': '', 'ip': '', 'page': None, 'session': '', } assert_events_equal(expected_event, emitted_event) @override_settings( EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR, ) class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase): """Ensure emitted events contain the fields necessary for Google Analytics.""" def test_event_fields(self): """ Test that course_id is added as the label if present, and nonInteraction is set. """ data = {sentinel.key: sentinel.value} context = { 'path': sentinel.path, 'user_id': sentinel.user_id, 'course_id': sentinel.course_id, 'org_id': sentinel.org_id, 'client_id': sentinel.client_id, } with self.tracker.context('test', context): self.tracker.emit(sentinel.name, data) emitted_event = self.get_event() expected_event = { 'context': context, 'data': data, 'label': sentinel.course_id, 'name': sentinel.name, 'nonInteraction': 1, 'timestamp': FROZEN_TIME, } assert_events_equal(expected_event, emitted_event) def test_no_course_id(self): """ Test that a label is not added if course_id is not specified, but nonInteraction is still set. """ data = {sentinel.key: sentinel.value} context = { 'path': sentinel.path, 'user_id': sentinel.user_id, 'client_id': sentinel.client_id, } with self.tracker.context('test', context): self.tracker.emit(sentinel.name, data) emitted_event = self.get_event() expected_event = { 'context': context, 'data': data, 'name': sentinel.name, 'nonInteraction': 1, 'timestamp': FROZEN_TIME, } assert_events_equal(expected_event, emitted_event)
agpl-3.0
Bogh/django-oscar
src/oscar/apps/payment/bankcards.py
22
2684
from django.utils.six.moves import map VISA, VISA_ELECTRON, MASTERCARD, AMEX, MAESTRO, DISCOVER = ( 'Visa', 'Visa Electron', 'Mastercard', 'American Express', 'Maestro', 'Discover') DINERS_CLUB = 'Diners Club' CHINA_UNIONPAY = 'China UnionPay' JCB = 'JCB' LASER = 'Laser' SOLO = 'Solo' SWITCH = 'Switch' # List of (type, lengths, prefixes) tuples # See http://en.wikipedia.org/wiki/Bank_card_number CARD_TYPES = [ (AMEX, (15,), ('34', '37')), (CHINA_UNIONPAY, (16, 17, 18, 19), ('62', '88')), (DINERS_CLUB, (14,), ('300', '301', '302', '303', '304', '305')), (DINERS_CLUB, (14,), ('36',)), (DISCOVER, (16,), list(map(str, list(range(622126, 622926)))) + list(map(str, list(range(644, 650)))) + ['6011', '65']), (JCB, (16,), map(str, list(range(3528, 3590)))), (LASER, list(range(16, 20)), ('6304', '6706', '6771', '6709')), (MAESTRO, list(range(12, 20)), ('5018', '5020', '5038', '5893', '6304', '6759', '6761', '6762', '6763', '0604')), (MASTERCARD, (16,), list(map(str, list(range(51, 56))))), # Diners Club cards match the same pattern as Mastercard. They are treated # as Mastercard normally so we put the mastercard pattern first. (DINERS_CLUB, (16,), ('54', '55')), (SOLO, list(range(16, 20)), ('6334', '6767')), (SWITCH, list(range(16, 20)), ('4903', '4905', '4911', '4936', '564182', '633110', '6333', '6759')), (VISA, (13, 16), ('4',)), (VISA_ELECTRON, (16,), ('4026', '417500', '4405', '4508', '4844', '4913', '4917')), ] def is_amex(number): return bankcard_type(number) == AMEX def bankcard_type(card_number): """ Return the type of a bankcard based on its card_number. Returns None is the card_number is not recognised. """ def matches(card_number, lengths, prefixes): if len(card_number) not in lengths: return False for prefix in prefixes: if card_number.startswith(prefix): return True return False for card_type, lengths, prefixes in CARD_TYPES: if matches(card_number, lengths, prefixes): return card_type def luhn(card_number): """ Test whether a bankcard number passes the Luhn algorithm. """ card_number = str(card_number) sum = 0 num_digits = len(card_number) odd_even = num_digits & 1 for i in range(0, num_digits): digit = int(card_number[i]) if not ((i & 1) ^ odd_even): digit = digit * 2 if digit > 9: digit = digit - 9 sum = sum + digit return (sum % 10) == 0
bsd-3-clause
rongdede/shadowsocks
shadowsocks/udprelay.py
924
11154
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # SOCKS5 UDP Request # +----+------+------+----------+----------+----------+ # |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA | # +----+------+------+----------+----------+----------+ # | 2 | 1 | 1 | Variable | 2 | Variable | # +----+------+------+----------+----------+----------+ # SOCKS5 UDP Response # +----+------+------+----------+----------+----------+ # |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA | # +----+------+------+----------+----------+----------+ # | 2 | 1 | 1 | Variable | 2 | Variable | # +----+------+------+----------+----------+----------+ # shadowsocks UDP Request (before encrypted) # +------+----------+----------+----------+ # | ATYP | DST.ADDR | DST.PORT | DATA | # +------+----------+----------+----------+ # | 1 | Variable | 2 | Variable | # +------+----------+----------+----------+ # shadowsocks UDP Response (before encrypted) # +------+----------+----------+----------+ # | ATYP | DST.ADDR | DST.PORT | DATA | # +------+----------+----------+----------+ # | 1 | Variable | 2 | Variable | # +------+----------+----------+----------+ # shadowsocks UDP Request and Response (after encrypted) # +-------+--------------+ # | IV | PAYLOAD | # +-------+--------------+ # | Fixed | Variable | # +-------+--------------+ # HOW TO NAME THINGS # ------------------ # `dest` means destination server, which is from DST fields in the SOCKS5 # request # `local` means local server of shadowsocks # `remote` means remote server of shadowsocks # `client` means UDP clients that connects to other servers # `server` means the UDP server that handles user requests from __future__ import absolute_import, division, print_function, \ with_statement import socket import logging import struct import errno import random from shadowsocks import encrypt, eventloop, lru_cache, common, shell from shadowsocks.common import parse_header, pack_addr BUF_SIZE = 65536 def client_key(source_addr, server_af): # notice this is server af, not dest af return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af) class UDPRelay(object): def __init__(self, config, dns_resolver, is_local, stat_callback=None): self._config = config if is_local: self._listen_addr = config['local_address'] self._listen_port = config['local_port'] self._remote_addr = config['server'] self._remote_port = config['server_port'] else: self._listen_addr = config['server'] self._listen_port = config['server_port'] self._remote_addr = None self._remote_port = None self._dns_resolver = dns_resolver self._password = common.to_bytes(config['password']) self._method = config['method'] self._timeout = config['timeout'] self._is_local = is_local self._cache = lru_cache.LRUCache(timeout=config['timeout'], close_callback=self._close_client) self._client_fd_to_server_addr = \ lru_cache.LRUCache(timeout=config['timeout']) self._dns_cache = lru_cache.LRUCache(timeout=300) self._eventloop = None self._closed = False self._sockets = set() if 'forbidden_ip' in config: self._forbidden_iplist = config['forbidden_ip'] else: self._forbidden_iplist = None addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0, socket.SOCK_DGRAM, socket.SOL_UDP) if len(addrs) == 0: raise Exception("can't get addrinfo for %s:%d" % (self._listen_addr, self._listen_port)) af, socktype, proto, canonname, sa = addrs[0] server_socket = socket.socket(af, socktype, proto) server_socket.bind((self._listen_addr, self._listen_port)) server_socket.setblocking(False) self._server_socket = server_socket self._stat_callback = stat_callback def _get_a_server(self): server = self._config['server'] server_port = self._config['server_port'] if type(server_port) == list: server_port = random.choice(server_port) if type(server) == list: server = random.choice(server) logging.debug('chosen server: %s:%d', server, server_port) return server, server_port def _close_client(self, client): if hasattr(client, 'close'): self._sockets.remove(client.fileno()) self._eventloop.remove(client) client.close() else: # just an address pass def _handle_server(self): server = self._server_socket data, r_addr = server.recvfrom(BUF_SIZE) if not data: logging.debug('UDP handle_server: data is empty') if self._stat_callback: self._stat_callback(self._listen_port, len(data)) if self._is_local: frag = common.ord(data[2]) if frag != 0: logging.warn('drop a message since frag is not 0') return else: data = data[3:] else: data = encrypt.encrypt_all(self._password, self._method, 0, data) # decrypt data if not data: logging.debug('UDP handle_server: data is empty after decrypt') return header_result = parse_header(data) if header_result is None: return addrtype, dest_addr, dest_port, header_length = header_result if self._is_local: server_addr, server_port = self._get_a_server() else: server_addr, server_port = dest_addr, dest_port addrs = self._dns_cache.get(server_addr, None) if addrs is None: addrs = socket.getaddrinfo(server_addr, server_port, 0, socket.SOCK_DGRAM, socket.SOL_UDP) if not addrs: # drop return else: self._dns_cache[server_addr] = addrs af, socktype, proto, canonname, sa = addrs[0] key = client_key(r_addr, af) client = self._cache.get(key, None) if not client: # TODO async getaddrinfo if self._forbidden_iplist: if common.to_str(sa[0]) in self._forbidden_iplist: logging.debug('IP %s is in forbidden list, drop' % common.to_str(sa[0])) # drop return client = socket.socket(af, socktype, proto) client.setblocking(False) self._cache[key] = client self._client_fd_to_server_addr[client.fileno()] = r_addr self._sockets.add(client.fileno()) self._eventloop.add(client, eventloop.POLL_IN, self) if self._is_local: data = encrypt.encrypt_all(self._password, self._method, 1, data) if not data: return else: data = data[header_length:] if not data: return try: client.sendto(data, (server_addr, server_port)) except IOError as e: err = eventloop.errno_from_exception(e) if err in (errno.EINPROGRESS, errno.EAGAIN): pass else: shell.print_exception(e) def _handle_client(self, sock): data, r_addr = sock.recvfrom(BUF_SIZE) if not data: logging.debug('UDP handle_client: data is empty') return if self._stat_callback: self._stat_callback(self._listen_port, len(data)) if not self._is_local: addrlen = len(r_addr[0]) if addrlen > 255: # drop return data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data response = encrypt.encrypt_all(self._password, self._method, 1, data) if not response: return else: data = encrypt.encrypt_all(self._password, self._method, 0, data) if not data: return header_result = parse_header(data) if header_result is None: return # addrtype, dest_addr, dest_port, header_length = header_result response = b'\x00\x00\x00' + data client_addr = self._client_fd_to_server_addr.get(sock.fileno()) if client_addr: self._server_socket.sendto(response, client_addr) else: # this packet is from somewhere else we know # simply drop that packet pass def add_to_loop(self, loop): if self._eventloop: raise Exception('already add to loop') if self._closed: raise Exception('already closed') self._eventloop = loop server_socket = self._server_socket self._eventloop.add(server_socket, eventloop.POLL_IN | eventloop.POLL_ERR, self) loop.add_periodic(self.handle_periodic) def handle_event(self, sock, fd, event): if sock == self._server_socket: if event & eventloop.POLL_ERR: logging.error('UDP server_socket err') self._handle_server() elif sock and (fd in self._sockets): if event & eventloop.POLL_ERR: logging.error('UDP client_socket err') self._handle_client(sock) def handle_periodic(self): if self._closed: if self._server_socket: self._server_socket.close() self._server_socket = None for sock in self._sockets: sock.close() logging.info('closed UDP port %d', self._listen_port) self._cache.sweep() self._client_fd_to_server_addr.sweep() def close(self, next_tick=False): logging.debug('UDP close') self._closed = True if not next_tick: if self._eventloop: self._eventloop.remove_periodic(self.handle_periodic) self._eventloop.remove(self._server_socket) self._server_socket.close() for client in list(self._cache.values()): client.close()
apache-2.0
lostatc/retain-sync
zielen/commands/init.py
2
8108
"""A class for the 'init' command. Copyright © 2016-2018 Garrett Powell <garrett@gpowell.net> This file is part of zielen. zielen is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. zielen is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with zielen. If not, see <http://www.gnu.org/licenses/>. """ import os import re import time import shutil import atexit import textwrap from typing import Optional from zielen.paths import get_program_dir from zielen.exceptions import InputError from zielen.fstools import check_dir from zielen.filelogic import FilesManager from zielen.userdata import LocalSyncDir, RemoteSyncDir from zielen.profile import Profile from zielen.commandbase import Command, unlock class InitCommand(Command): """Run the "init" command. Attributes: profile_input: The "name" argument for the command. profile: The currently selected profile. exclude: The argument for the "--exclude" option. template: The argument for the "--template" option. add_remote: The "--add-remote" options was given. """ def __init__(self, profile_input: str, exclude=None, template=None, add_remote=False) -> None: super().__init__() self.profile_input = profile_input self.exclude = exclude self.template = template self.add_remote = add_remote @unlock def main(self) -> None: """Run the command. Raises: InputError: The command-line arguments were invalid. """ # Define cleanup functions. def cleanup_profile() -> None: """Remove the profile directory if empty.""" try: os.rmdir(self.profile.path) except OSError: pass def delete_profile() -> None: """Delete the profile directory.""" try: shutil.rmtree(self.profile.path) except FileNotFoundError: pass # Check that value of profile name is valid. if re.search(r"\s+", self.profile_input): raise InputError("profile name must not contain spaces") elif not re.search(r"^[\w-]+$", self.profile_input): raise InputError( "profile name must not contain special symbols") # Check the arguments of command-line options. if self.exclude: if not os.path.isfile(self.exclude): raise InputError( "argument for '--exclude' is not a valid file") if self.template: if not os.path.isfile(self.template): raise InputError( "argument for '--template' is not a valid file") atexit.register(cleanup_profile) self.profile = Profile(self.profile_input) try: self.profile.read() except FileNotFoundError: pass # Check if the profile has already been initialized. if self.profile.status == "initialized": raise InputError("this profile already exists") # Lock profile if not already locked. self.lock() # Check whether an interrupted initialization is being resumed. if self.profile.status == "partial": # Resume an interrupted initialization. print("Resuming initialization...\n") atexit.register(self.print_interrupt_msg) # The user doesn't have to specify the same command-line arguments # when they're resuming and initialization. self.add_remote = self.profile.add_remote self.local_dir = LocalSyncDir(self.profile.local_path) self.remote_dir = RemoteSyncDir(self.profile.remote_path) fm = FilesManager(self.local_dir, self.remote_dir, self.profile) else: # Start a new initialization. atexit.register(delete_profile) # Generate all files in the profile directory. init_options = { "add_remote": self.add_remote} self.profile.generate( init_options=init_options, exclude_path=self.exclude, template_path=self.template) # Check validity of local and remote directories. error_message = check_dir( self.profile.local_path, self.add_remote) if error_message: raise InputError("local directory {}".format(error_message)) error_message = self._verify_local_dir(self.profile.local_path) if error_message: raise InputError(error_message) error_message = check_dir( self.profile.remote_path, not self.add_remote) if error_message: raise InputError("remote directory {}".format(error_message)) self.local_dir = LocalSyncDir(self.profile.local_path) self.remote_dir = RemoteSyncDir(self.profile.remote_path) fm = FilesManager(self.local_dir, self.remote_dir, self.profile) # The profile is now partially initialized. If the # initialization is interrupted from this point, it can be # resumed. atexit.register(self.print_interrupt_msg) atexit.unregister(delete_profile) self.remote_dir.generate() # Copy files and/or create symlinks. if self.add_remote: fm.setup_from_remote() else: fm.setup_from_local() # Copy exclude pattern file to remote directory for use when remote dir # is shared. self.remote_dir.add_exclude_file(self.profile.exclude_path, self.profile.id) # The profile is now fully initialized. Update the profile. self.remote_dir.write() self.profile.status = "initialized" self.profile.last_sync = time.time() self.profile.last_adjust = time.time() self.profile.write() atexit.unregister(self.print_interrupt_msg) print(textwrap.dedent(""" Run the following commands to start the daemon: 'systemctl --user start zielen@{0}.service' 'systemctl --user enable zielen@{0}.service'""".format( self.profile.name))) def _verify_local_dir(self, dir_path: str) -> Optional[str]: """Verity that local directory path doesn't overlap other profiles. Also verify that the local directory path doesn't overlap with the program directory. Args: dir_path: The absolute path of the local directory. Returns: An error message if the local directory is invalid and None otherwise. """ common_path = os.path.commonpath([dir_path, get_program_dir()]) if common_path in [dir_path, get_program_dir()]: return "local directory must not contain zielen config files" overlap_profiles = [] for name, profile in self.profiles.items(): if profile is self.profile or not os.path.isfile(profile.cfg_path): continue profile.read() common_path = os.path.commonpath([profile.local_path, dir_path]) if common_path in [profile.local_path, dir_path]: overlap_profiles.append(name) if overlap_profiles: # Print a comma-separated list of conflicting profile names # after the error message. suffix = "s" if len(overlap_profiles) > 1 else "" return "local directory overlaps with the profile{0} {1}".format( suffix, ", ".join("'{}'".format(x) for x in overlap_profiles))
gpl-3.0
eamontoyaa/pyCSS
functions/defineslipcircle.py
1
6612
# Import modules import numpy as np from circleby2ptsradius import circleby2ptsradius from unitvector import unitvector from azimuthangle import azimuthangle ''' # Description. Define the values of the slip circle arc (i.e. arc center point, radius and initial and final angles ) by giving two extreme points and the circle radius. # External subfunction(s): circleby2ptsradius, azimuthangle, unitvector. # Input(s). Two dimensional vector of the coordinates that defines the slip at the slope toe (pointAtToeVec); Two dimensional vector of the coordinates that defines the slip at the slope crown (pointAtCrownVec); Arc radius (slipRadius). # Output(s). Boolean variable giving True if an arc is possible within the given input variables and the sense of a slip surface (existSlipCircleTrue). Normaly, any arc can satisfy two points and a radius definition; but there are less arcs can either attain the conditions to be an concave inferior arc to represent a slip surface. Dictionary type structure of the arc that defines the slip (slipArcSTR). The fields of the strucrure is as following: center: center of the slip arc; radius: radius of the slip arc; iniAngGrad: counter clockwise angle (in sexagesimal grades) from a reference unit vector [1 0] to the initial radius that defines the arc; endAngGrad: counter clockwise angle (in sexagesimal grades) from a reference unit vector [1 0] to the final radius that defines the arc; deepDist: deepest distance from toe--point horizontal reference where the arc passes; leftDist: most left distance from toe--point vertical reference where the arc passes; # Example1: When putting the following inputs: pointAtToeVec = np.array([40, 12]) pointAtCrownVec = np.array([4.347, 24]) slipRadius = 34.4848 The outputs then are: True, {'center': array([ 31.39356183, 45.39357203]), 'deepDist': 10.908772031832854, 'endAngGrad': 284.45218279917071, 'iniAngGrad': 218.34366020340661, 'leftDist': -3.0912381712059478, 'radius': 34.4848} --- existSlipCircleTrue, slipArcSTR = defineslipcircle(pointAtToeVec, \ pointAtCrownVec, slipRadius) ''' def defineslipcircle(pointAtToeVec, pointAtCrownVec, slipRadius): ## Finding out the two possible centers within the points centerVec1, centerVec2 = circleby2ptsradius(pointAtToeVec, \ pointAtCrownVec, slipRadius) ## Verifying if exist a circle existSlipCircleTrue = True error = False for i in list(range(len(centerVec1))): if centerVec1[i] == 'NaN' or centerVec2[i] == 'NaN': existSlipCircleTrue = False break ## Doing the math if existSlipCircleTrue == False: error = True else: ## Selecting the appropriate center vector for the slip circle. # The line unit vector. diffVec = (pointAtCrownVec-pointAtToeVec) diffUnitVec = unitvector(diffVec) # The line equation (analitical eq.). lineSlope = diffUnitVec[1]/diffUnitVec[0] intercept = pointAtToeVec[1]-lineSlope*pointAtToeVec[0] # Verifying. y1 = intercept+lineSlope*centerVec1[0] y2 = intercept+lineSlope*centerVec2[0] if centerVec1[1] >= y1 and centerVec2[1] < y2: slipCenterVec = centerVec1 elif centerVec1[1] < y1 and centerVec2[1] >= y2: slipCenterVec = centerVec2 else: print ('error: there is no slip at that points') error = True if error == True: ##assigning values slipCenterVec = np.array(['NaN', 'NaN']) slipRadius = 'NaN' initialAngleGrad = 'NaN' endAngleGrad = 'NaN' deepestVertDepth = 'NaN' mostleftHorzDist = 'NaN' else: ## Finding out the sector initial and final angles. # Toe vector. toeCenter2PtVec = pointAtToeVec-slipCenterVec toeCenter2PtUnitVec = unitvector(toeCenter2PtVec) toeRadiusAngleRad = azimuthangle(toeCenter2PtUnitVec) endAngleGrad = toeRadiusAngleRad*180/np.pi # Crown vector. crownCenter2PtVec = pointAtCrownVec-slipCenterVec; crownCenter2PtUnitVec = unitvector(crownCenter2PtVec) crownRadiusAngleRad = azimuthangle(crownCenter2PtUnitVec) initialAngleGrad = crownRadiusAngleRad*180/np.pi # Extreme slip values. # Deepest vertical point at the slip circle deepestVertDepth = slipCenterVec[1]-slipRadius # Nost Left horizontal distance at the slio circle mostleftHorzDist = slipCenterVec[0]-slipRadius ## Creating the structure slipArcSTR = {'center': slipCenterVec, 'radius': slipRadius, 'iniAngGrad': initialAngleGrad, 'endAngGrad': endAngleGrad, 'deepDist': deepestVertDepth, 'leftDist': mostleftHorzDist} return existSlipCircleTrue, slipArcSTR ''' BSD 2 license. Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O. Suarez-Burgoa and Exneyder Andrés Montoya Araque. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
bsd-2-clause
mandeepdhami/neutron
neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py
4
5313
# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.i18n import _LE, _LW from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc LOG = logging.getLogger(__name__) class PciDeviceIPWrapper(ip_lib.IPWrapper): """Wrapper class for ip link commands. wrapper for getting/setting pci device details using ip link... """ VF_PATTERN = r"^vf\s+(?P<vf_index>\d+)\s+" MAC_PATTERN = r"MAC\s+(?P<mac>[a-fA-F0-9:]+)," STATE_PATTERN = r"\s+link-state\s+(?P<state>\w+)" ANY_PATTERN = ".*," VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT) class LinkState(object): ENABLE = "enable" DISABLE = "disable" def __init__(self, dev_name): super(PciDeviceIPWrapper, self).__init__() self.dev_name = dev_name def get_assigned_macs(self, vf_list): """Get assigned mac addresses for vf list. @param vf_list: list of vf indexes @return: list of assigned mac addresses """ try: out = self._execute('', "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show(vf_list, out) vf_details_list = [] if vf_lines: for vf_line in vf_lines: vf_details = self._parse_vf_link_show(vf_line) if vf_details: vf_details_list.append(vf_details) return [details.get("MAC") for details in vf_details_list] def get_vf_state(self, vf_index): """Get vf state {True/False} @param vf_index: vf index @todo: Handle "auto" state """ try: out = self._execute('', "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show([vf_index], out) if vf_lines: vf_details = self._parse_vf_link_show(vf_lines[0]) if vf_details: state = vf_details.get("link-state", self.LinkState.DISABLE) if state != self.LinkState.DISABLE: return True return False def set_vf_state(self, vf_index, state): """sets vf state. @param vf_index: vf index @param state: required state {True/False} """ status_str = self.LinkState.ENABLE if state else \ self.LinkState.DISABLE try: self._execute('', "link", ("set", self.dev_name, "vf", str(vf_index), "state", status_str)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) def _get_vf_link_show(self, vf_list, link_show_out): """Get link show output for VFs get vf link show command output filtered by given vf list @param vf_list: list of vf indexes @param link_show_out: link show command output @return: list of output rows regarding given vf_list """ vf_lines = [] for line in link_show_out.split("\n"): line = line.strip() if line.startswith("vf"): details = line.split() index = int(details[1]) if index in vf_list: vf_lines.append(line) if not vf_lines: LOG.warning(_LW("Cannot find vfs %(vfs)s in device %(dev_name)s"), {'vfs': vf_list, 'dev_name': self.dev_name}) return vf_lines def _parse_vf_link_show(self, vf_line): """Parses vf link show command output line. @param vf_line: link show vf line """ vf_details = {} pattern_match = self.VF_DETAILS_REG_EX.match(vf_line) if pattern_match: vf_details["vf"] = int(pattern_match.group("vf_index")) vf_details["MAC"] = pattern_match.group("mac") vf_details["link-state"] = pattern_match.group("state") else: LOG.warning(_LW("failed to parse vf link show line %(line)s: " "for %(device)s"), {'line': vf_line, 'device': self.dev_name}) return vf_details
apache-2.0
podemos-info/odoo
addons/delivery/partner.py
10
1466
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv class res_partner(osv.osv): _inherit = 'res.partner' _columns = { 'property_delivery_carrier': fields.property( 'delivery.carrier', type='many2one', relation='delivery.carrier', string="Delivery Method", view_load=True, help="This delivery method will be used when invoicing from picking."), } res_partner() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
magopian/django-runcommands
runcommands/tests.py
1
1393
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import, division from django.core.exceptions import ImproperlyConfigured from django.utils.unittest import TestCase from .views import RunCommandView, run_command class RunCommandTest(TestCase): def test_run_command(self): """run_command runs the command and returns the output.""" output = run_command('echo Hello World') self.assertEqual(output.strip(), 'Hello World') class RunCommandViewTest(TestCase): def test_get_command(self): """RunCommand.get_command() returns the command set for this slug.""" view = RunCommandView(command='foo') command = view.get_command() self.assertEqual(command, 'foo') def test_get_command_not_set(self): """RunCommand.get_command() raises ImproperlyConfigured if no command has been set.""" view = RunCommandView() with self.assertRaises(ImproperlyConfigured): view.get_command() def test_get_context_data(self): """RunCommand.get_context_data() sets the command output.""" view = RunCommandView(command='foo') view.command_runner = lambda command: command # return argument view.get_command = lambda: 'foo output' context = view.get_context_data() self.assertEqual(context['command_output'], 'foo output')
bsd-3-clause
felipenaselva/felipe.repository
script.module.resolveurl/lib/resolveurl/plugins/vidics.py
1
2286
""" Kodi resolveurl plugin Copyright (C) 2017 script.module.resolveurl This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from lib import helpers from resolveurl import common from resolveurl.resolver import ResolveUrl, ResolverError class VidicsResolver(ResolveUrl): name = 'vidics' domains = ['vidics.tv'] pattern = '(?://|\.)(vidics\.tv)/embed/([0-9a-zA-Z]+)' def __init__(self): self.net = common.Net() def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) headers = {'User-Agent': common.RAND_UA} html = self.net.http_GET(web_url, headers=headers).content if html: quals = re.findall("""href=["'].+?id=["'](\d{3,4})p""", html) source = re.search("""mp4\d+\s*=\s*["']([^"']+)""", html) if source: headers.update({'Referer': web_url}) if len(quals) > 1: sources = [(qual, re.sub('-\d{3,4}\.', '-%s.' % qual, source.group(1))) for qual in quals] try: sources.sort(key=lambda x: int(re.sub("\D", "", x[0])), reverse=True) except: common.logger.log_debug('Scrape sources sort failed |int(re.sub("\D", "", x[0])|') return helpers.pick_source(sources) + helpers.append_headers(headers) else: return source.group(1) + helpers.append_headers(headers) raise ResolverError('Unable to locate video') def get_url(self, host, media_id): return self._default_get_url(host, media_id, template='https://embed1.{host}/embed/{media_id}/')
gpl-2.0
edx/edx-platform
openedx/core/lib/grade_utils.py
9
2509
""" Helpers functions for grades and scores. """ import math def compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False): """ Returns a tuple of: 1. Whether the 2nd set of scores is higher than the first. 2. Grade percentage of 1st set of scores. 3. Grade percentage of 2nd set of scores. If ``treat_undefined_as_zero`` is True, this function will treat cases where ``possible1`` or ``possible2`` is 0 as if the (earned / possible) score is 0. If this flag is false, a ZeroDivisionError is raised. """ try: percentage1 = float(earned1) / float(possible1) except ZeroDivisionError: if not treat_undefined_as_zero: raise percentage1 = 0.0 try: percentage2 = float(earned2) / float(possible2) except ZeroDivisionError: if not treat_undefined_as_zero: raise percentage2 = 0.0 is_higher = percentage2 >= percentage1 return is_higher, percentage1, percentage2 def is_score_higher_or_equal(earned1, possible1, earned2, possible2, treat_undefined_as_zero=False): """ Returns whether the 2nd set of scores is higher than the first. If ``treat_undefined_as_zero`` is True, this function will treat cases where ``possible1`` or ``possible2`` is 0 as if the (earned / possible) score is 0. If this flag is false, a ZeroDivisionError is raised. """ is_higher_or_equal, _, _ = compare_scores(earned1, possible1, earned2, possible2, treat_undefined_as_zero) return is_higher_or_equal def round_away_from_zero(number, digits=0): """ Round numbers using the 'away from zero' strategy as opposed to the 'Banker's rounding strategy.' The strategy refers to how we round when a number is half way between two numbers. eg. 0.5, 1.5, etc. In python 2 positive numbers in this category would be rounded up and negative numbers would be rounded down. ie. away from zero. In python 3 numbers round towards even. So 0.5 would round to 0 but 1.5 would round to 2. See here for more on floating point rounding strategies: https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules We want to continue to round away from zero so that student grades remain consistent and don't suddenly change. """ p = 10.0 ** digits if number >= 0: return float(math.floor((number * p) + 0.5)) / p else: return float(math.ceil((number * p) - 0.5)) / p
agpl-3.0
rhlobo/bigtempo
bigtempo/processors/dataframe_task.py
1
3801
# -*- coding: utf-8 -*- import pandas import datetime import bigtempo.utils as utils _DEFAULT_FREQUENCY = 'B' _KNOWN_FREQUENCIES = ['U', 'L', 'S', 'T', 'H', 'B', 'W', 'BMS', 'BM', 'MS', 'M', 'BQS', 'BQ', 'QS', 'Q', 'BAS', 'BA', 'AS', 'A'] _FREQUENCY_ENUMERATION_DICT = dict((y, x) for x, y in enumerate(_KNOWN_FREQUENCIES)) def factory(instance, registration, dependency_dict, *args, **kwargs): return DataFrameDatasourceTask(instance, registration, dependency_dict) class DataFrameDatasourceTask(object): def __init__(self, instance, registration, dependency_dict): self._instance = instance self._dependency_dict = dependency_dict self._registration = registration def process(self, symbol, start=None, end=None): context = self._create_context_for(symbol, start, end) result = self._instance.evaluate(context, symbol, start, end) return utils.slice(result, start, end) def _create_context_for(self, symbol, start=None, end=None): evaluated_dependencies = self._evaluate_datasource_dependencies(symbol, start, end) return DatasourceContext(evaluated_dependencies) def _evaluate_datasource_dependencies(self, symbol, start=None, end=None): result = {} new_start = None if not start else evaluate_loopback_period(self._registration, self._dependency_dict.values(), start) for reference, dependency in self._dependency_dict.iteritems(): result[reference] = dependency.process(symbol, new_start, end) return result class DatasourceContext(object): def __init__(self, dependencies): self._dependencies = dependencies def dependencies(self, reference=None): return self._dependencies.get(reference) if reference else self._dependencies def evaluate_loopback_period(datasource_registration, dependencies, date): lookback = datasource_registration['lookback'] frequency = determine_frequency(datasource_registration.get('frequency'), dependencies) # Holiday workaround if frequency in ['B', 'C']: lookback = 1 + int(lookback * 1.08) lookback += 1 return relative_period(-lookback, frequency, date) def determine_frequency(datasource_frequency=None, dependencies=None): if datasource_frequency is not None: return datasource_frequency if dependencies is None or len(dependencies) is 0: return _DEFAULT_FREQUENCY dependencies_frequencies = [] for dependency in dependencies: dependency_frequency = dependency._registration.get('frequency') dependency_dependencies = dependency._dependency_dict.values() dependencies_frequencies.append(determine_frequency(dependency_frequency, dependency_dependencies)) return max(dependencies_frequencies, key=_frequency_sort_key) def _frequency_sort_key(value): frequency = value.split('-')[0] if frequency not in _FREQUENCY_ENUMERATION_DICT: return 0 return _FREQUENCY_ENUMERATION_DICT[frequency] def relative_period(periods, frequency, date=None): business_day = equivalent_business_day() if not date else equivalent_business_day(date) return (pandas.Period(business_day, freq=frequency) + periods).to_timestamp() def equivalent_business_day(date=None): if not date: date = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) isoweekday = date.isoweekday() return date if isoweekday <= 5 else date - datetime.timedelta(isoweekday - 5)
mit
bairuiworld/cuda-convnet2
python_util/data.py
180
7803
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as n from numpy.random import randn, rand, random_integers import os from threading import Thread from util import * BATCH_META_FILE = "batches.meta" class DataLoaderThread(Thread): def __init__(self, path, tgt): Thread.__init__(self) self.path = path self.tgt = tgt def run(self): self.tgt += [unpickle(self.path)] class DataProvider: BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$') def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False): if batch_range == None: batch_range = DataProvider.get_batch_nums(data_dir) if init_batchnum is None or init_batchnum not in batch_range: init_batchnum = batch_range[0] self.data_dir = data_dir self.batch_range = batch_range self.curr_epoch = init_epoch self.curr_batchnum = init_batchnum self.dp_params = dp_params self.batch_meta = self.get_batch_meta(data_dir) self.data_dic = None self.test = test self.batch_idx = batch_range.index(init_batchnum) def get_next_batch(self): if self.data_dic is None or len(self.batch_range) > 1: self.data_dic = self.get_batch(self.curr_batchnum) epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() return epoch, batchnum, self.data_dic def get_batch(self, batch_num): fname = self.get_data_file_name(batch_num) if os.path.isdir(fname): # batch in sub-batches sub_batches = sorted(os.listdir(fname), key=alphanum_key) #print sub_batches num_sub_batches = len(sub_batches) tgts = [[] for i in xrange(num_sub_batches)] threads = [DataLoaderThread(os.path.join(fname, s), tgt) for (s, tgt) in zip(sub_batches, tgts)] for thread in threads: thread.start() for thread in threads: thread.join() return [t[0] for t in tgts] return unpickle(self.get_data_file_name(batch_num)) def get_data_dims(self,idx=0): return self.batch_meta['num_vis'] if idx == 0 else 1 def advance_batch(self): self.batch_idx = self.get_next_batch_idx() self.curr_batchnum = self.batch_range[self.batch_idx] if self.batch_idx == 0: # we wrapped self.curr_epoch += 1 def get_next_batch_idx(self): return (self.batch_idx + 1) % len(self.batch_range) def get_next_batch_num(self): return self.batch_range[self.get_next_batch_idx()] # get filename of current batch def get_data_file_name(self, batchnum=None): if batchnum is None: batchnum = self.curr_batchnum return os.path.join(self.data_dir, 'data_batch_%d' % batchnum) @classmethod def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False): # why the fuck can't i reference DataProvider in the original definition? #cls.dp_classes['default'] = DataProvider type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider if type.startswith("dummy-"): name = "-".join(type.split('-')[:-1]) + "-n" if name not in dp_types: raise DataProviderException("No such data provider: %s" % type) _class = dp_classes[name] dims = int(type.split('-')[-1]) return _class(dims) elif type in dp_types: _class = dp_classes[type] return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test) raise DataProviderException("No such data provider: %s" % type) @classmethod def register_data_provider(cls, name, desc, _class): if name in dp_types: raise DataProviderException("Data provider %s already registered" % name) dp_types[name] = desc dp_classes[name] = _class @staticmethod def get_batch_meta(data_dir): return unpickle(os.path.join(data_dir, BATCH_META_FILE)) @staticmethod def get_batch_filenames(srcdir): return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key) @staticmethod def get_batch_nums(srcdir): names = DataProvider.get_batch_filenames(srcdir) return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names))) @staticmethod def get_num_batches(srcdir): return len(DataProvider.get_batch_nums(srcdir)) class DummyDataProvider(DataProvider): def __init__(self, data_dim): #self.data_dim = data_dim self.batch_range = [1] self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True} self.curr_epoch = 1 self.curr_batchnum = 1 self.batch_idx = 0 def get_next_batch(self): epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() data = rand(512, self.get_data_dims()).astype(n.single) return self.curr_epoch, self.curr_batchnum, {'data':data} class LabeledDataProvider(DataProvider): def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False): DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test) def get_num_classes(self): return len(self.batch_meta['label_names']) class LabeledDummyDataProvider(DummyDataProvider): def __init__(self, data_dim, num_classes=10, num_cases=7): #self.data_dim = data_dim self.batch_range = [1] self.batch_meta = {'num_vis': data_dim, 'label_names': [str(x) for x in range(num_classes)], 'data_in_rows':True} self.num_cases = num_cases self.num_classes = num_classes self.curr_epoch = 1 self.curr_batchnum = 1 self.batch_idx=0 self.data = None def get_num_classes(self): return self.num_classes def get_next_batch(self): epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() if self.data is None: data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single) self.data, self.labels = data, labels else: data, labels = self.data, self.labels # print data.shape, labels.shape return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ] dp_types = {"dummy-n": "Dummy data provider for n-dimensional data", "dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"} dp_classes = {"dummy-n": DummyDataProvider, "dummy-labeled-n": LabeledDummyDataProvider} class DataProviderException(Exception): pass
apache-2.0
Workday/OpenFrame
build/android/pylib/remote/device/remote_device_uirobot_test_run.py
26
2952
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Run specific test on specific environment.""" import logging from pylib.base import base_test_result from pylib.remote.device import appurify_sanitized from pylib.remote.device import remote_device_test_run from pylib.remote.device import remote_device_helper class RemoteDeviceUirobotTestRun(remote_device_test_run.RemoteDeviceTestRun): """Run uirobot tests on a remote device.""" def __init__(self, env, test_instance): """Constructor. Args: env: Environment the tests will run in. test_instance: The test that will be run. """ super(RemoteDeviceUirobotTestRun, self).__init__(env, test_instance) #override def TestPackage(self): return self._test_instance.package_name #override def _TriggerSetUp(self): """Set up the triggering of a test run.""" logging.info('Triggering test run.') if self._env.device_type == 'Android': default_runner_type = 'android_robot' elif self._env.device_type == 'iOS': default_runner_type = 'ios_robot' else: raise remote_device_helper.RemoteDeviceError( 'Unknown device type: %s' % self._env.device_type) self._app_id = self._UploadAppToDevice(self._test_instance.app_under_test) if not self._env.runner_type: runner_type = default_runner_type logging.info('Using default runner type: %s', default_runner_type) else: runner_type = self._env.runner_type self._test_id = self._UploadTestToDevice( 'android_robot', None, app_id=self._app_id) config_body = {'duration': self._test_instance.minutes} self._SetTestConfig(runner_type, config_body) # TODO(rnephew): Switch to base class implementation when supported. #override def _UploadTestToDevice(self, test_type, test_path, app_id=None): if test_path: logging.info("Ignoring test path.") data = { 'access_token':self._env.token, 'test_type':test_type, 'app_id':app_id, } with appurify_sanitized.SanitizeLogging(self._env.verbose_count, logging.WARNING): test_upload_res = appurify_sanitized.utils.post('tests/upload', data, None) remote_device_helper.TestHttpResponse( test_upload_res, 'Unable to get UiRobot test id.') return test_upload_res.json()['response']['test_id'] #override def _ParseTestResults(self): logging.info('Parsing results from remote service.') results = base_test_result.TestRunResults() if self._results['results']['pass']: result_type = base_test_result.ResultType.PASS else: result_type = base_test_result.ResultType.FAIL results.AddResult(base_test_result.BaseTestResult('uirobot', result_type)) return results
bsd-3-clause
openhatch/oh-missions-oppia-beta
core/domain/fs_domain.py
2
10316
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects representing a file system and a file stream.""" __author__ = 'Sean Lip' import logging import os from core.platform import models (file_models,) = models.Registry.import_models([ models.NAMES.file ]) import feconf import utils CHANGE_LIST_SAVE = [{'cmd': 'save'}] class FileMetadata(object): """A class representing the metadata of a file.""" def __init__(self, metadata): self._size = metadata.size if (metadata is not None) else None @property def size(self): return self._size class FileStreamWithMetadata(object): """A class that wraps a file stream, but adds extra attributes to it.""" def __init__(self, content, version, metadata): """The args are a file content blob and a metadata model object.""" self._content = content self._version = version self._metadata = FileMetadata(metadata) def read(self): """Emulates stream.read(). Returns all bytes and emulates EOF.""" content = self._content self._content = '' return content @property def metadata(self): return self._metadata @property def version(self): return self._version class ExplorationFileSystem(object): """A datastore-backed read-write file system for a single exploration. The conceptual intention is for each exploration to have its own asset folder. An asset has no meaning outside its exploration, so the assets in these asset folders should therefore not be edited directly. They should only be modified as side-effects of some other operation (such as adding an image to an exploration). The content of an exploration should include a reference to the asset together with the version number of the asset. This allows the exploration to refer to asset versions. In general, assets should be retrieved only within the context of the exploration that contains them, and should not be retrieved outside this context. """ _DEFAULT_VERSION_NUMBER = 1 def __init__(self, exploration_id): self._exploration_id = exploration_id @property def exploration_id(self): return self._exploration_id def _get_file_metadata(self, filepath, version): """Return the desired file metadata. Returns None if the file does not exist. """ if version is None: return file_models.FileMetadataModel.get_model( self._exploration_id, 'assets/%s' % filepath) else: return file_models.FileMetadataModel.get_version( self._exploration_id, 'assets/%s' % filepath, version) def _get_file_data(self, filepath, version): """Return the desired file content. Returns None if the file does not exist. """ if version is None: return file_models.FileModel.get_model( self._exploration_id, 'assets/%s' % filepath) else: return file_models.FileModel.get_version( self._exploration_id, 'assets/%s' % filepath, version) def _save_file(self, user_id, filepath, raw_bytes): """Create or update a file.""" if len(raw_bytes) > feconf.MAX_FILE_SIZE_BYTES: raise Exception('The maximum allowed file size is 1 MB.') metadata = self._get_file_metadata(filepath, None) if not metadata: metadata = file_models.FileMetadataModel.create( self._exploration_id, 'assets/%s' % filepath) metadata.size = len(raw_bytes) data = self._get_file_data(filepath, None) if not data: data = file_models.FileModel.create( self._exploration_id, 'assets/%s' % filepath) data.content = raw_bytes data.commit(user_id, CHANGE_LIST_SAVE) metadata.commit(user_id, CHANGE_LIST_SAVE) def get(self, filepath, version=None): """Gets a file as an unencoded stream of raw bytes. If `version` is not supplied, the latest version is retrieved. If the file does not exist, None is returned. """ metadata = self._get_file_metadata(filepath, version) if metadata: data = self._get_file_data(filepath, version) if data: if version is None: version = data.version return FileStreamWithMetadata(data.content, version, metadata) else: logging.error( 'Metadata and data for file %s (version %s) are out of ' 'sync.' % (filepath, version)) return None else: return None def commit(self, user_id, filepath, raw_bytes): """Saves a raw bytestring as a file in the database.""" self._save_file(user_id, filepath, raw_bytes) def delete(self, user_id, filepath): """Marks the current version of a file as deleted.""" metadata = self._get_file_metadata(filepath, None) if metadata: metadata.delete(user_id, '') data = self._get_file_data(filepath, None) if data: data.delete(user_id, '') def isfile(self, filepath): """Checks the existence of a file.""" metadata = self._get_file_metadata(filepath, None) return bool(metadata) def listdir(self, dir_name): """Lists all files in a directory. Args: dir_name: The directory whose files should be listed. This should not start with '/' or end with '/'. Returns: List of str. This is a lexicographically-sorted list of filenames, each of which is prefixed with dir_name. """ # The trailing slash is necessary to prevent non-identical directory # names with the same prefix from matching, e.g. /abcd/123.png should # not match a query for files under /abc/. prefix = '%s' % os.path.join( '/', self._exploration_id, 'assets', dir_name) if not prefix.endswith('/'): prefix += '/' result = set() metadata_models = file_models.FileMetadataModel.get_undeleted() for metadata_model in metadata_models: filepath = metadata_model.id if filepath.startswith(prefix): result.add('/'.join(filepath.split('/')[3:])) return sorted(list(result)) class DiskBackedFileSystem(object): """Implementation for a disk-backed file system. This implementation ignores versioning and is used only by tests. """ def __init__(self, root): """Constructor for this class. Args: root: the path to append to the oppia/ directory. """ self._root = os.path.join(os.getcwd(), root) self._exploration_id = 'test' @property def exploration_id(self): return self._exploration_id def isfile(self, filepath): """Checks if a file exists.""" return os.path.isfile(os.path.join(self._root, filepath)) def get(self, filepath, version=None): """Returns a bytestring with the file content, but no metadata.""" content = utils.get_file_contents( os.path.join(self._root, filepath), raw_bytes=True) return FileStreamWithMetadata(content, None, None) def commit(self, user_id, filepath, raw_bytes): raise NotImplementedError def delete(self, user_id, filepath): raise NotImplementedError def listdir(self, dir_name): raise NotImplementedError class AbstractFileSystem(object): """Interface for a file system.""" def __init__(self, impl): self._impl = impl @property def impl(self): return self._impl def _check_filepath(self, filepath): """Raises an error if a filepath is invalid.""" base_dir = os.path.join('/', self.impl.exploration_id, 'assets') absolute_path = os.path.join(base_dir, filepath) normalized_path = os.path.normpath(absolute_path) # This check prevents directory traversal. if not normalized_path.startswith(base_dir): raise IOError('Invalid filepath: %s' % filepath) def isfile(self, filepath): """Checks if a file exists. Similar to os.path.isfile(...).""" self._check_filepath(filepath) return self._impl.isfile(filepath) def open(self, filepath, version=None): """Returns a stream with the file content. Similar to open(...).""" self._check_filepath(filepath) return self._impl.get(filepath, version=version) def get(self, filepath, version=None): """Returns a bytestring with the file content, but no metadata.""" file_stream = self.open(filepath, version=version) if file_stream is None: raise IOError( 'File %s (version %s) not found.' % (filepath, version if version else 'latest')) return file_stream.read() def commit(self, user_id, filepath, raw_bytes): """Replaces the contents of the file with the given bytestring.""" raw_bytes = str(raw_bytes) self._check_filepath(filepath) self._impl.commit(user_id, filepath, raw_bytes) def delete(self, user_id, filepath): """Deletes a file and the metadata associated with it.""" self._check_filepath(filepath) self._impl.delete(user_id, filepath) def listdir(self, dir_name): """Lists all the files in a directory. Similar to os.listdir(...).""" self._check_filepath(dir_name) return self._impl.listdir(dir_name)
apache-2.0
morelab/weblabdeusto
server/src/weblab/db/upgrade/scheduling/env.py
3
2051
from __future__ import print_function, unicode_literals from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure(url=url) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
bsd-2-clause
Split-Screen/android_kernel_motorola_otus
tools/perf/python/twatch.py
7370
1334
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
gpl-2.0
WillGuan105/django
tests/template_tests/syntax_tests/test_i18n.py
245
23356
# coding: utf-8 from __future__ import unicode_literals from django.template import TemplateSyntaxError from django.test import SimpleTestCase from django.utils import translation from django.utils.safestring import mark_safe from ..utils import setup class I18nTagTests(SimpleTestCase): libraries = { 'custom': 'template_tests.templatetags.custom', 'i18n': 'django.templatetags.i18n', } @setup({'i18n01': '{% load i18n %}{% trans \'xxxyyyxxx\' %}'}) def test_i18n01(self): """ simple translation of a string delimited by ' """ output = self.engine.render_to_string('i18n01') self.assertEqual(output, 'xxxyyyxxx') @setup({'i18n02': '{% load i18n %}{% trans "xxxyyyxxx" %}'}) def test_i18n02(self): """ simple translation of a string delimited by " """ output = self.engine.render_to_string('i18n02') self.assertEqual(output, 'xxxyyyxxx') @setup({'i18n03': '{% load i18n %}{% blocktrans %}{{ anton }}{% endblocktrans %}'}) def test_i18n03(self): """ simple translation of a variable """ output = self.engine.render_to_string('i18n03', {'anton': b'\xc3\x85'}) self.assertEqual(output, 'Å') @setup({'i18n04': '{% load i18n %}{% blocktrans with berta=anton|lower %}{{ berta }}{% endblocktrans %}'}) def test_i18n04(self): """ simple translation of a variable and filter """ output = self.engine.render_to_string('i18n04', {'anton': b'\xc3\x85'}) self.assertEqual(output, 'å') @setup({'legacyi18n04': '{% load i18n %}' '{% blocktrans with anton|lower as berta %}{{ berta }}{% endblocktrans %}'}) def test_legacyi18n04(self): """ simple translation of a variable and filter """ output = self.engine.render_to_string('legacyi18n04', {'anton': b'\xc3\x85'}) self.assertEqual(output, 'å') @setup({'i18n05': '{% load i18n %}{% blocktrans %}xxx{{ anton }}xxx{% endblocktrans %}'}) def test_i18n05(self): """ simple translation of a string with interpolation """ output = self.engine.render_to_string('i18n05', {'anton': 'yyy'}) self.assertEqual(output, 'xxxyyyxxx') @setup({'i18n06': '{% load i18n %}{% trans "Page not found" %}'}) def test_i18n06(self): """ simple translation of a string to german """ with translation.override('de'): output = self.engine.render_to_string('i18n06') self.assertEqual(output, 'Seite nicht gefunden') @setup({'i18n07': '{% load i18n %}' '{% blocktrans count counter=number %}singular{% plural %}' '{{ counter }} plural{% endblocktrans %}'}) def test_i18n07(self): """ translation of singular form """ output = self.engine.render_to_string('i18n07', {'number': 1}) self.assertEqual(output, 'singular') @setup({'legacyi18n07': '{% load i18n %}' '{% blocktrans count number as counter %}singular{% plural %}' '{{ counter }} plural{% endblocktrans %}'}) def test_legacyi18n07(self): """ translation of singular form """ output = self.engine.render_to_string('legacyi18n07', {'number': 1}) self.assertEqual(output, 'singular') @setup({'i18n08': '{% load i18n %}' '{% blocktrans count number as counter %}singular{% plural %}' '{{ counter }} plural{% endblocktrans %}'}) def test_i18n08(self): """ translation of plural form """ output = self.engine.render_to_string('i18n08', {'number': 2}) self.assertEqual(output, '2 plural') @setup({'legacyi18n08': '{% load i18n %}' '{% blocktrans count counter=number %}singular{% plural %}' '{{ counter }} plural{% endblocktrans %}'}) def test_legacyi18n08(self): """ translation of plural form """ output = self.engine.render_to_string('legacyi18n08', {'number': 2}) self.assertEqual(output, '2 plural') @setup({'i18n09': '{% load i18n %}{% trans "Page not found" noop %}'}) def test_i18n09(self): """ simple non-translation (only marking) of a string to german """ with translation.override('de'): output = self.engine.render_to_string('i18n09') self.assertEqual(output, 'Page not found') @setup({'i18n10': '{{ bool|yesno:_("yes,no,maybe") }}'}) def test_i18n10(self): """ translation of a variable with a translated filter """ with translation.override('de'): output = self.engine.render_to_string('i18n10', {'bool': True}) self.assertEqual(output, 'Ja') @setup({'i18n11': '{{ bool|yesno:"ja,nein" }}'}) def test_i18n11(self): """ translation of a variable with a non-translated filter """ output = self.engine.render_to_string('i18n11', {'bool': True}) self.assertEqual(output, 'ja') @setup({'i18n12': '{% load i18n %}' '{% get_available_languages as langs %}{% for lang in langs %}' '{% if lang.0 == "de" %}{{ lang.0 }}{% endif %}{% endfor %}'}) def test_i18n12(self): """ usage of the get_available_languages tag """ output = self.engine.render_to_string('i18n12') self.assertEqual(output, 'de') @setup({'i18n13': '{{ _("Password") }}'}) def test_i18n13(self): """ translation of constant strings """ with translation.override('de'): output = self.engine.render_to_string('i18n13') self.assertEqual(output, 'Passwort') @setup({'i18n14': '{% cycle "foo" _("Password") _(\'Password\') as c %} {% cycle c %} {% cycle c %}'}) def test_i18n14(self): """ translation of constant strings """ with translation.override('de'): output = self.engine.render_to_string('i18n14') self.assertEqual(output, 'foo Passwort Passwort') @setup({'i18n15': '{{ absent|default:_("Password") }}'}) def test_i18n15(self): """ translation of constant strings """ with translation.override('de'): output = self.engine.render_to_string('i18n15', {'absent': ''}) self.assertEqual(output, 'Passwort') @setup({'i18n16': '{{ _("<") }}'}) def test_i18n16(self): """ translation of constant strings """ with translation.override('de'): output = self.engine.render_to_string('i18n16') self.assertEqual(output, '<') @setup({'i18n17': '{% load i18n %}' '{% blocktrans with berta=anton|escape %}{{ berta }}{% endblocktrans %}'}) def test_i18n17(self): """ Escaping inside blocktrans and trans works as if it was directly in the template. """ output = self.engine.render_to_string('i18n17', {'anton': 'α & β'}) self.assertEqual(output, 'α &amp; β') @setup({'i18n18': '{% load i18n %}' '{% blocktrans with berta=anton|force_escape %}{{ berta }}{% endblocktrans %}'}) def test_i18n18(self): output = self.engine.render_to_string('i18n18', {'anton': 'α & β'}) self.assertEqual(output, 'α &amp; β') @setup({'i18n19': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'}) def test_i18n19(self): output = self.engine.render_to_string('i18n19', {'andrew': 'a & b'}) self.assertEqual(output, 'a &amp; b') @setup({'i18n20': '{% load i18n %}{% trans andrew %}'}) def test_i18n20(self): output = self.engine.render_to_string('i18n20', {'andrew': 'a & b'}) self.assertEqual(output, 'a &amp; b') @setup({'i18n21': '{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}'}) def test_i18n21(self): output = self.engine.render_to_string('i18n21', {'andrew': mark_safe('a & b')}) self.assertEqual(output, 'a & b') @setup({'i18n22': '{% load i18n %}{% trans andrew %}'}) def test_i18n22(self): output = self.engine.render_to_string('i18n22', {'andrew': mark_safe('a & b')}) self.assertEqual(output, 'a & b') @setup({'legacyi18n17': '{% load i18n %}' '{% blocktrans with anton|escape as berta %}{{ berta }}{% endblocktrans %}'}) def test_legacyi18n17(self): output = self.engine.render_to_string('legacyi18n17', {'anton': 'α & β'}) self.assertEqual(output, 'α &amp; β') @setup({'legacyi18n18': '{% load i18n %}' '{% blocktrans with anton|force_escape as berta %}' '{{ berta }}{% endblocktrans %}'}) def test_legacyi18n18(self): output = self.engine.render_to_string('legacyi18n18', {'anton': 'α & β'}) self.assertEqual(output, 'α &amp; β') @setup({'i18n23': '{% load i18n %}{% trans "Page not found"|capfirst|slice:"6:" %}'}) def test_i18n23(self): """ #5972 - Use filters with the {% trans %} tag """ with translation.override('de'): output = self.engine.render_to_string('i18n23') self.assertEqual(output, 'nicht gefunden') @setup({'i18n24': '{% load i18n %}{% trans \'Page not found\'|upper %}'}) def test_i18n24(self): with translation.override('de'): output = self.engine.render_to_string('i18n24') self.assertEqual(output, 'SEITE NICHT GEFUNDEN') @setup({'i18n25': '{% load i18n %}{% trans somevar|upper %}'}) def test_i18n25(self): with translation.override('de'): output = self.engine.render_to_string('i18n25', {'somevar': 'Page not found'}) self.assertEqual(output, 'SEITE NICHT GEFUNDEN') @setup({'i18n26': '{% load i18n %}' '{% blocktrans with extra_field=myextra_field count counter=number %}' 'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'}) def test_i18n26(self): """ translation of plural form with extra field in singular form (#13568) """ output = self.engine.render_to_string('i18n26', {'myextra_field': 'test', 'number': 1}) self.assertEqual(output, 'singular test') @setup({'legacyi18n26': '{% load i18n %}' '{% blocktrans with myextra_field as extra_field count number as counter %}' 'singular {{ extra_field }}{% plural %}plural{% endblocktrans %}'}) def test_legacyi18n26(self): output = self.engine.render_to_string('legacyi18n26', {'myextra_field': 'test', 'number': 1}) self.assertEqual(output, 'singular test') @setup({'i18n27': '{% load i18n %}{% blocktrans count counter=number %}' '{{ counter }} result{% plural %}{{ counter }} results' '{% endblocktrans %}'}) def test_i18n27(self): """ translation of singular form in russian (#14126) """ with translation.override('ru'): output = self.engine.render_to_string('i18n27', {'number': 1}) self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442') @setup({'legacyi18n27': '{% load i18n %}' '{% blocktrans count number as counter %}{{ counter }} result' '{% plural %}{{ counter }} results{% endblocktrans %}'}) def test_legacyi18n27(self): with translation.override('ru'): output = self.engine.render_to_string('legacyi18n27', {'number': 1}) self.assertEqual(output, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442') @setup({'i18n28': '{% load i18n %}' '{% blocktrans with a=anton b=berta %}{{ a }} + {{ b }}{% endblocktrans %}'}) def test_i18n28(self): """ simple translation of multiple variables """ output = self.engine.render_to_string('i18n28', {'anton': 'α', 'berta': 'β'}) self.assertEqual(output, 'α + β') @setup({'legacyi18n28': '{% load i18n %}' '{% blocktrans with anton as a and berta as b %}' '{{ a }} + {{ b }}{% endblocktrans %}'}) def test_legacyi18n28(self): output = self.engine.render_to_string('legacyi18n28', {'anton': 'α', 'berta': 'β'}) self.assertEqual(output, 'α + β') # retrieving language information @setup({'i18n28_2': '{% load i18n %}' '{% get_language_info for "de" as l %}' '{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'}) def test_i18n28_2(self): output = self.engine.render_to_string('i18n28_2') self.assertEqual(output, 'de: German/Deutsch bidi=False') @setup({'i18n29': '{% load i18n %}' '{% get_language_info for LANGUAGE_CODE as l %}' '{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'}) def test_i18n29(self): output = self.engine.render_to_string('i18n29', {'LANGUAGE_CODE': 'fi'}) self.assertEqual(output, 'fi: Finnish/suomi bidi=False') @setup({'i18n30': '{% load i18n %}' '{% get_language_info_list for langcodes as langs %}' '{% for l in langs %}{{ l.code }}: {{ l.name }}/' '{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'}) def test_i18n30(self): output = self.engine.render_to_string('i18n30', {'langcodes': ['it', 'no']}) self.assertEqual(output, 'it: Italian/italiano bidi=False; no: Norwegian/norsk bidi=False; ') @setup({'i18n31': '{% load i18n %}' '{% get_language_info_list for langcodes as langs %}' '{% for l in langs %}{{ l.code }}: {{ l.name }}/' '{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}'}) def test_i18n31(self): output = self.engine.render_to_string('i18n31', {'langcodes': (('sl', 'Slovenian'), ('fa', 'Persian'))}) self.assertEqual( output, 'sl: Slovenian/Sloven\u0161\u010dina bidi=False; ' 'fa: Persian/\u0641\u0627\u0631\u0633\u06cc bidi=True; ' ) @setup({'i18n32': '{% load i18n %}{{ "hu"|language_name }} ' '{{ "hu"|language_name_local }} {{ "hu"|language_bidi }} ' '{{ "hu"|language_name_translated }}'}) def test_i18n32(self): output = self.engine.render_to_string('i18n32') self.assertEqual(output, 'Hungarian Magyar False Hungarian') with translation.override('cs'): output = self.engine.render_to_string('i18n32') self.assertEqual(output, 'Hungarian Magyar False maďarsky') @setup({'i18n33': '{% load i18n %}' '{{ langcode|language_name }} {{ langcode|language_name_local }} ' '{{ langcode|language_bidi }} {{ langcode|language_name_translated }}'}) def test_i18n33(self): output = self.engine.render_to_string('i18n33', {'langcode': 'nl'}) self.assertEqual(output, 'Dutch Nederlands False Dutch') with translation.override('cs'): output = self.engine.render_to_string('i18n33', {'langcode': 'nl'}) self.assertEqual(output, 'Dutch Nederlands False nizozemsky') # blocktrans handling of variables which are not in the context. # this should work as if blocktrans was not there (#19915) @setup({'i18n34': '{% load i18n %}{% blocktrans %}{{ missing }}{% endblocktrans %}'}) def test_i18n34(self): output = self.engine.render_to_string('i18n34') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'i18n34_2': '{% load i18n %}{% blocktrans with a=\'α\' %}{{ missing }}{% endblocktrans %}'}) def test_i18n34_2(self): output = self.engine.render_to_string('i18n34_2') if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'i18n34_3': '{% load i18n %}{% blocktrans with a=anton %}{{ missing }}{% endblocktrans %}'}) def test_i18n34_3(self): output = self.engine.render_to_string('i18n34_3', {'anton': '\xce\xb1'}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') # trans tag with as var @setup({'i18n35': '{% load i18n %}{% trans "Page not found" as page_not_found %}{{ page_not_found }}'}) def test_i18n35(self): with translation.override('de'): output = self.engine.render_to_string('i18n35') self.assertEqual(output, 'Seite nicht gefunden') @setup({'i18n36': '{% load i18n %}' '{% trans "Page not found" noop as page_not_found %}{{ page_not_found }}'}) def test_i18n36(self): with translation.override('de'): output = self.engine.render_to_string('i18n36') self.assertEqual(output, 'Page not found') @setup({'i18n37': '{% load i18n %}' '{% trans "Page not found" as page_not_found %}' '{% blocktrans %}Error: {{ page_not_found }}{% endblocktrans %}'}) def test_i18n37(self): with translation.override('de'): output = self.engine.render_to_string('i18n37') self.assertEqual(output, 'Error: Seite nicht gefunden') # Test whitespace in filter arguments @setup({'i18n38': '{% load i18n custom %}' '{% get_language_info for "de"|noop:"x y" as l %}' '{{ l.code }}: {{ l.name }}/{{ l.name_local }}/' '{{ l.name_translated }} bidi={{ l.bidi }}'}) def test_i18n38(self): with translation.override('cs'): output = self.engine.render_to_string('i18n38') self.assertEqual(output, 'de: German/Deutsch/německy bidi=False') @setup({'i18n38_2': '{% load i18n custom %}' '{% get_language_info_list for langcodes|noop:"x y" as langs %}' '{% for l in langs %}{{ l.code }}: {{ l.name }}/' '{{ l.name_local }}/{{ l.name_translated }} ' 'bidi={{ l.bidi }}; {% endfor %}'}) def test_i18n38_2(self): with translation.override('cs'): output = self.engine.render_to_string('i18n38_2', {'langcodes': ['it', 'fr']}) self.assertEqual( output, 'it: Italian/italiano/italsky bidi=False; ' 'fr: French/français/francouzsky bidi=False; ' ) # blocktrans tag with asvar @setup({'i18n39': '{% load i18n %}' '{% blocktrans asvar page_not_found %}Page not found{% endblocktrans %}' '>{{ page_not_found }}<'}) def test_i18n39(self): with translation.override('de'): output = self.engine.render_to_string('i18n39') self.assertEqual(output, '>Seite nicht gefunden<') @setup({'i18n40': '{% load i18n %}' '{% trans "Page not found" as pg_404 %}' '{% blocktrans with page_not_found=pg_404 asvar output %}' 'Error: {{ page_not_found }}' '{% endblocktrans %}'}) def test_i18n40(self): output = self.engine.render_to_string('i18n40') self.assertEqual(output, '') @setup({'i18n41': '{% load i18n %}' '{% trans "Page not found" as pg_404 %}' '{% blocktrans with page_not_found=pg_404 asvar output %}' 'Error: {{ page_not_found }}' '{% endblocktrans %}' '>{{ output }}<'}) def test_i18n41(self): with translation.override('de'): output = self.engine.render_to_string('i18n41') self.assertEqual(output, '>Error: Seite nicht gefunden<') @setup({'template': '{% load i18n %}{% trans %}A}'}) def test_syntax_error_no_arguments(self): msg = "'trans' takes at least one argument" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" badoption %}'}) def test_syntax_error_bad_option(self): msg = "Unknown argument for 'trans' tag: 'badoption'" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" as %}'}) def test_syntax_error_missing_assignment(self): msg = "No argument provided to the 'trans' tag for the as option." with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% blocktrans asvar %}Yes{% endblocktrans %}'}) def test_blocktrans_syntax_error_missing_assignment(self): msg = "No argument provided to the 'blocktrans' tag for the asvar option." with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" as var context %}'}) def test_syntax_error_missing_context(self): msg = "No argument provided to the 'trans' tag for the context option." with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" context as var %}'}) def test_syntax_error_context_as(self): msg = "Invalid argument 'as' provided to the 'trans' tag for the context option" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" context noop %}'}) def test_syntax_error_context_noop(self): msg = "Invalid argument 'noop' provided to the 'trans' tag for the context option" with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "Yes" noop noop %}'}) def test_syntax_error_duplicate_option(self): msg = "The 'noop' option was specified more than once." with self.assertRaisesMessage(TemplateSyntaxError, msg): self.engine.render_to_string('template') @setup({'template': '{% load i18n %}{% trans "%s" %}'}) def test_trans_tag_using_a_string_that_looks_like_str_fmt(self): output = self.engine.render_to_string('template') self.assertEqual(output, '%s') @setup({'template': '{% load i18n %}{% blocktrans %}%s{% endblocktrans %}'}) def test_blocktrans_tag_using_a_string_that_looks_like_str_fmt(self): output = self.engine.render_to_string('template') self.assertEqual(output, '%s')
bsd-3-clause
punchagan/zulip
zerver/webhooks/deskdotcom/view.py
6
1047
# Webhooks for external integrations. from django.http import HttpRequest, HttpResponse from zerver.decorator import authenticated_rest_api_view from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_success from zerver.lib.webhooks.common import check_send_webhook_message from zerver.models import UserProfile # Desk.com's integrations all make the user supply a template, where it fills # in stuff like {{customer.name}} and posts the result as a "data" parameter. # There's no raw JSON for us to work from. Thus, it makes sense to just write # a template Zulip message within Desk.com and have the webhook extract that # from the "data" param and post it, which this does. @authenticated_rest_api_view(webhook_client_name="Desk") @has_request_variables def api_deskdotcom_webhook( request: HttpRequest, user_profile: UserProfile, data: str = REQ() ) -> HttpResponse: topic = "Desk.com notification" check_send_webhook_message(request, user_profile, topic, data) return json_success()
apache-2.0
Gchorba/Ask
lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py
3133
34872
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # EUCTW frequency table # Converted from big5 work # by Taiwan's Mandarin Promotion Council # <http:#www.edu.tw:81/mandr/> # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 # Char to FreqOrder table , EUCTW_TABLE_SIZE = 8102 EUCTWCharToFreqOrder = ( 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 #Everything below is of no interest for detection purpose 2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118 2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134 8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150 8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166 8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182 8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198 8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214 8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230 8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246 8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262 8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278 8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294 8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310 8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326 8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342 8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358 8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374 8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390 8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406 8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422 8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438 8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454 8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470 8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486 8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502 8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518 8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534 8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550 8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566 8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582 8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598 8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614 8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630 8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646 8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662 8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678 8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694 8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710 8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726 8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742 # flake8: noqa
mit
bmatheny/gtest
test/gtest_help_test.py
2968
5856
#!/usr/bin/env python # # Copyright 2009, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the --help flag of Google C++ Testing Framework. SYNOPSIS gtest_help_test.py --build_dir=BUILD/DIR # where BUILD/DIR contains the built gtest_help_test_ file. gtest_help_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import gtest_test_utils IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' IS_WINDOWS = os.name == 'nt' PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') FLAG_PREFIX = '--gtest_' DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), re.sub('^--', '/', LIST_TESTS_FLAG), re.sub('_', '-', LIST_TESTS_FLAG)] INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( [PROGRAM_PATH, LIST_TESTS_FLAG]).output # The help message must match this regex. HELP_REGEX = re.compile( FLAG_PREFIX + r'list_tests.*' + FLAG_PREFIX + r'filter=.*' + FLAG_PREFIX + r'also_run_disabled_tests.*' + FLAG_PREFIX + r'repeat=.*' + FLAG_PREFIX + r'shuffle.*' + FLAG_PREFIX + r'random_seed=.*' + FLAG_PREFIX + r'color=.*' + FLAG_PREFIX + r'print_time.*' + FLAG_PREFIX + r'output=.*' + FLAG_PREFIX + r'break_on_failure.*' + FLAG_PREFIX + r'throw_on_failure.*' + FLAG_PREFIX + r'catch_exceptions=0.*', re.DOTALL) def RunWithFlag(flag): """Runs gtest_help_test_ with the given flag. Returns: the exit code and the text output as a tuple. Args: flag: the command-line flag to pass to gtest_help_test_, or None. """ if flag is None: command = [PROGRAM_PATH] else: command = [PROGRAM_PATH, flag] child = gtest_test_utils.Subprocess(command) return child.exit_code, child.output class GTestHelpTest(gtest_test_utils.TestCase): """Tests the --help flag and its equivalent forms.""" def TestHelpFlag(self, flag): """Verifies correct behavior when help flag is specified. The right message must be printed and the tests must skipped when the given flag is specified. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assertEquals(0, exit_code) self.assert_(HELP_REGEX.search(output), output) if IS_LINUX: self.assert_(STREAM_RESULT_TO_FLAG in output, output) else: self.assert_(STREAM_RESULT_TO_FLAG not in output, output) if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: self.assert_(DEATH_TEST_STYLE_FLAG in output, output) else: self.assert_(DEATH_TEST_STYLE_FLAG not in output, output) def TestNonHelpFlag(self, flag): """Verifies correct behavior when no help flag is specified. Verifies that when no help flag is specified, the tests are run and the help message is not printed. Args: flag: A flag to pass to the binary or None. """ exit_code, output = RunWithFlag(flag) self.assert_(exit_code != 0) self.assert_(not HELP_REGEX.search(output), output) def testPrintsHelpWithFullFlag(self): self.TestHelpFlag('--help') def testPrintsHelpWithShortFlag(self): self.TestHelpFlag('-h') def testPrintsHelpWithQuestionFlag(self): self.TestHelpFlag('-?') def testPrintsHelpWithWindowsStyleQuestionFlag(self): self.TestHelpFlag('/?') def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): self.TestHelpFlag(UNKNOWN_FLAG) def testPrintsHelpWithIncorrectFlagStyle(self): for incorrect_flag in INCORRECT_FLAG_VARIANTS: self.TestHelpFlag(incorrect_flag) def testRunsTestsWithoutHelpFlag(self): """Verifies that when no help flag is specified, the tests are run and the help message is not printed.""" self.TestNonHelpFlag(None) def testRunsTestsWithGtestInternalFlag(self): """Verifies that the tests are run and no help message is printed when a flag starting with Google Test prefix and 'internal_' is supplied.""" self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING) if __name__ == '__main__': gtest_test_utils.Main()
bsd-3-clause
benoitsteiner/tensorflow-xsmm
tensorflow/contrib/learn/python/learn/ops/losses_ops.py
39
3358
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow Ops for loss computation (deprecated). This module and all its submodules are deprecated. See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md) for migration instructions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework import deprecated from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops as array_ops_ from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops.losses import losses @deprecated('2016-12-01', 'Use `tf.losses.mean_squared_error` ' 'and explicit logits computation.') def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None): """Returns prediction and loss for mean squared error regression.""" with ops.name_scope(name, 'mean_squared_error_regressor', [tensor_in, labels]): predictions = nn.xw_plus_b(tensor_in, weights, biases) if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2: predictions = array_ops_.squeeze(predictions, axis=[1]) return predictions, losses.mean_squared_error(labels, predictions) @deprecated('2016-12-01', 'Use `tf.losses.softmax_cross_entropy` ' 'and explicit logits computation.') def softmax_classifier(tensor_in, labels, weights, biases, class_weight=None, name=None): """Returns prediction and loss for softmax classifier. This function returns "probabilities" and a cross entropy loss. To obtain predictions, use `tf.argmax` on the returned probabilities. This function requires labels to be passed in one-hot encoding. Args: tensor_in: Input tensor, [batch_size, feature_size], features. labels: Tensor, [batch_size, n_classes], one-hot labels of the output classes. weights: Tensor, [batch_size, feature_size], linear transformation matrix. biases: Tensor, [batch_size], biases. class_weight: Tensor, optional, [n_classes], weight for each class. If not given, all classes are supposed to have weight one. name: Operation name. Returns: `tuple` of softmax predictions and loss `Tensor`s. """ with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]): logits = nn.xw_plus_b(tensor_in, weights, biases) if class_weight is not None: logits = math_ops.multiply(logits, class_weight) return nn.softmax(logits), losses.softmax_cross_entropy(labels, logits)
apache-2.0
Flowdalic/bitcoin
test/functional/wallet_listtransactions.py
2
10588
#!/usr/bin/env python3 # Copyright (c) 2014-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listtransactions API.""" from decimal import Decimal from io import BytesIO from test_framework.messages import COIN, CTransaction from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, bytes_to_hex_str, hex_str_to_bytes, sync_mempools, ) def tx_from_hex(hexstring): tx = CTransaction() f = BytesIO(hex_str_to_bytes(hexstring)) tx.deserialize(f) return tx class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.enable_mocktime() def run_test(self): # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "amount": Decimal("-0.1"), "confirmations": 0}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "amount": Decimal("0.1"), "confirmations": 0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "amount": Decimal("-0.1"), "confirmations": 1}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "amount": Decimal("0.1"), "confirmations": 1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "send"}, {"amount": Decimal("-0.2")}) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "receive"}, {"amount": Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = {self.nodes[0].getnewaddress(): 0.11, self.nodes[1].getnewaddress(): 0.22, self.nodes[0].getnewaddress(): 0.33, self.nodes[1].getnewaddress(): 0.44} txid = self.nodes[1].sendmany("", send_to) self.sync_all() assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.11")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.11")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.33")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.33")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.44")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.44")}, {"txid": txid}) pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey'] multisig = self.nodes[1].createmultisig(1, [pubkey]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert not [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=False) if "label" in tx and tx["label"] == "watchonly"] txs = [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=True) if "label" in tx and tx['label'] == 'watchonly'] assert_array_result(txs, {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid}) self.run_rbf_opt_in_test() # Check that the opt-in-rbf flag works properly, for sent and received # transactions. def run_rbf_opt_in_test(self): # Check whether a transaction signals opt-in RBF itself def is_opt_in(node, txid): rawtx = node.getrawtransaction(txid, 1) for x in rawtx["vin"]: if x["sequence"] < 0xfffffffe: return True return False # Find an unconfirmed output matching a certain txid def get_unconfirmed_utxo_entry(node, txid_to_match): utxo = node.listunspent(0, 0) for i in utxo: if i["txid"] == txid_to_match: return i return None # 1. Chain a few transactions that don't opt-in. txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) assert(not is_opt_in(self.nodes[0], txid_1)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) # Tx2 will build off txid_1, still not opting in to RBF. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) assert_equal(utxo_to_use["safe"], True) utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) assert_equal(utxo_to_use["safe"], False) # Create tx2 using createrawtransaction inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.999} tx2 = self.nodes[1].createrawtransaction(inputs, outputs) tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"] txid_2 = self.nodes[1].sendrawtransaction(tx2_signed) # ...and check the result assert(not is_opt_in(self.nodes[1], txid_2)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"}) # Tx3 will opt-in to RBF utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}] outputs = {self.nodes[1].getnewaddress(): 0.998} tx3 = self.nodes[0].createrawtransaction(inputs, outputs) tx3_modified = tx_from_hex(tx3) tx3_modified.vin[0].nSequence = 0 tx3 = bytes_to_hex_str(tx3_modified.serialize()) tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex'] txid_3 = self.nodes[0].sendrawtransaction(tx3_signed) assert(is_opt_in(self.nodes[0], txid_3)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"}) # Tx4 will chain off tx3. Doesn't signal itself, but depends on one # that does. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3) inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.997} tx4 = self.nodes[1].createrawtransaction(inputs, outputs) tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"] txid_4 = self.nodes[1].sendrawtransaction(tx4_signed) assert(not is_opt_in(self.nodes[1], txid_4)) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"}) sync_mempools(self.nodes) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"}) # Replace tx3, and check that tx4 becomes unknown tx3_b = tx3_modified tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee tx3_b = bytes_to_hex_str(tx3_b.serialize()) tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex'] txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True) assert(is_opt_in(self.nodes[0], txid_3b)) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"}) sync_mempools(self.nodes) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"}) # Check gettransaction as well: for n in self.nodes[0:2]: assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") # After mining a transaction, it's no longer BIP125-replaceable self.nodes[0].generate(1) assert(txid_3b not in self.nodes[0].getrawmempool()) assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown") if __name__ == '__main__': ListTransactionsTest().main()
mit
mattias-ohlsson/anaconda
pyanaconda/installclass.py
1
11061
# # installclass.py: This is the prototypical class for workstation, server, and # kickstart installs. The interface to BaseInstallClass is *public* -- # ISVs/OEMs can customize the install by creating a new derived type of this # class. # # Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 # Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from distutils.sysconfig import get_python_lib import os, sys, iutil import isys import string import language import imputil import types from constants import * from product import * from storage.partspec import * import gettext _ = lambda x: gettext.ldgettext("anaconda", x) import logging log = logging.getLogger("anaconda") from flags import flags class BaseInstallClass(object): # default to not being hidden hidden = 0 pixmap = None showMinimal = 1 showLoginChoice = 0 _description = "" _descriptionFields = () name = "base" pkgstext = "" # default to showing the upgrade option showUpgrade = True bootloaderTimeoutDefault = None bootloaderExtraArgs = [] _l10n_domain = None # list of of (txt, grplist) tuples for task selection screen tasks = [] # don't select this class by default default = 0 # by default, place this under the "install" category; it gets it's # own toplevel category otherwise parentClass = ( _("Install on System"), "install.png" ) def _get_description(self): return _(self._description) % self._descriptionFields description = property(_get_description) @property def l10n_domain(self): if self._l10n_domain is None: raise RuntimeError("Localization domain for '%s' not set." % self.name) return self._l10n_domain def postAction(self, anaconda): anaconda.backend.postAction(anaconda) def setSteps(self, anaconda): dispatch = anaconda.dispatch dispatch.schedule_steps( "language", "keyboard", "filtertype", "filter", "storageinit", "findrootparts", "betanag", "cleardiskssel", "parttype", "autopartitionexecute", "storagedone", "bootloader", "network", "timezone", "accounts", "reposetup", "basepkgsel", "tasksel", "postselection", "reipl", "install", "enablefilesystems", "setuptime", "preinstallconfig", "installpackages", "postinstallconfig", "writeconfig", "firstboot", "instbootloader", "dopostaction", "postscripts", "writeksconfig", "methodcomplete", "copylogs", "setfilecon", "complete" ) if isFinal: dispatch.skip_steps("betanag") if iutil.isEfi() or not iutil.isX86(): dispatch.skip_steps("bootloader") # allow backends to disable interactive package selection if not anaconda.backend.supportsPackageSelection: dispatch.skip_steps("tasksel") dispatch.skip_steps("group-selection") # allow install classes to turn off the upgrade if not self.showUpgrade or not anaconda.backend.supportsUpgrades: dispatch.skip_steps("findrootparts") # 'noupgrade' can be used on the command line to force not looking # for partitions to upgrade. useful in some cases... if flags.cmdline.has_key("noupgrade"): dispatch.skip_steps("findrootparts") # upgrade will also always force looking for an upgrade. if flags.cmdline.has_key("upgrade"): dispatch.request_steps("findrootparts") # allow interface backends to skip certain steps. map(lambda s: dispatch.skip_steps(s), anaconda.intf.unsupported_steps()) # modifies the uri from installmethod.getMethodUri() to take into # account any installclass specific things including multiple base # repositories. takes a string or list of strings, returns a dict # with string keys and list values {%repo: %uri_list} def getPackagePaths(self, uri): if not type(uri) == types.ListType: uri = [uri,] return {'base': uri} def setPackageSelection(self, anaconda): pass def setGroupSelection(self, anaconda): grps = anaconda.backend.getDefaultGroups(anaconda) map(lambda x: anaconda.backend.selectGroup(x), grps) def getBackend(self): # this should be overriden in distro install classes from backend import AnacondaBackend return AnacondaBackend def setDefaultPartitioning(self, storage, platform): autorequests = [PartSpec(mountpoint="/", fstype=storage.defaultFSType, size=1024, maxSize=50*1024, grow=True, btr=True, lv=True, encrypted=True), PartSpec(mountpoint="/home", fstype=storage.defaultFSType, size=500, grow=True, requiredSpace=50*1024, btr=True, lv=True, encrypted=True)] bootreq = platform.setDefaultPartitioning() if bootreq: autorequests.extend(bootreq) (minswap, maxswap) = iutil.swapSuggestion() autorequests.append(PartSpec(fstype="swap", size=minswap, maxSize=maxswap, grow=True, lv=True, encrypted=True)) storage.autoPartitionRequests = autorequests def configure(self, anaconda): anaconda.bootloader.timeout = self.bootloaderTimeoutDefault anaconda.bootloader.boot_args.update(self.bootloaderExtraArgs) def versionMatches(self, oldver): pass def productMatches(self, oldprod): pass def productUpgradable(self, arch, oldprod, oldver): """ Return a tuple with: (Upgradable True|False, dict of tests and status) The dict has True|False for: product, version, arch tests. """ def archesEq(a, b): import re if re.match("i.86", a) and re.match("i.86", b): return True else: return a == b result = { "product" : self.productMatches(oldprod), "version" : self.versionMatches(oldver), "arch" : archesEq(arch, productArch) } return (all(result.values()), result) def setNetworkOnbootDefault(self, network): pass def __init__(self): pass allClasses = [] allClasses_hidden = [] # returns ( className, classObject, classLogo ) tuples def availableClasses(showHidden=0): global allClasses global allClasses_hidden def _ordering(first, second): ((name1, obj, logo), priority1) = first ((name2, obj, logo), priority2) = second if priority1 < priority2: return -1 elif priority1 > priority2: return 1 if name1 < name2: return -1 elif name1 > name2: return 1 return 0 if not showHidden: if allClasses: return allClasses else: if allClasses_hidden: return allClasses_hidden path = [] for dir in ["installclasses", "/tmp/updates/pyanaconda/installclasses", "/tmp/product/pyanaconda/installclasses", "%s/pyanaconda/installclasses" % get_python_lib(plat_specific=1) ]: if os.access(dir, os.R_OK): path.append(dir) # append the location of installclasses to the python path so we # can import them sys.path = path + sys.path files = [] for p in reversed(path): files += os.listdir(p) done = {} list = [] for file in files: if file[0] == '.': continue if len (file) < 4: continue if file[-3:] != ".py" and file[-4:-1] != ".py": continue mainName = string.split(file, ".")[0] if done.has_key(mainName): continue done[mainName] = 1 try: found = imputil.imp.find_module(mainName) except ImportError as e: log.warning ("module import of %s failed: %s" % (mainName, sys.exc_type)) continue try: loaded = imputil.imp.load_module(mainName, found[0], found[1], found[2]) obj = loaded.InstallClass if obj.__dict__.has_key('sortPriority'): sortOrder = obj.sortPriority else: sortOrder = 0 if obj.__dict__.has_key('arch'): if obj.arch != iutil.getArch(): obj.hidden = 1 if obj.hidden == 0 or showHidden == 1: list.append(((obj.name, obj, obj.pixmap), sortOrder)) except ImportError as e: log.warning ("module import of %s failed: %s" % (mainName, sys.exc_type)) if flags.debug: raise else: continue list.sort(_ordering) for (item, priority) in list: if showHidden: allClasses_hidden.append(item) else: allClasses.append(item) if showHidden: return allClasses_hidden else: return allClasses def getBaseInstallClass(): # figure out what installclass we should base on. allavail = availableClasses(showHidden = 1) avail = availableClasses(showHidden = 0) if len(avail) == 1: (cname, cobject, clogo) = avail[0] log.info("using only installclass %s" %(cname,)) elif len(allavail) == 1: (cname, cobject, clogo) = allavail[0] log.info("using only installclass %s" %(cname,)) # Use the highest priority install class if more than one found. elif len(avail) > 1: (cname, cobject, clogo) = avail.pop() log.info('%s is the highest priority installclass, using it' % cname) elif len(allavail) > 1: (cname, cobject, clogo) = allavail.pop() log.info('%s is the highest priority installclass, using it' % cname) # Default to the base installclass if nothing else is found. else: raise RuntimeError, "Unable to find an install class to use!!!" return cobject baseclass = getBaseInstallClass() # we need to be able to differentiate between this and custom class DefaultInstall(baseclass): def __init__(self): baseclass.__init__(self)
gpl-2.0
tkhsu/quick-qemu
scripts/tracetool/format/h.py
98
1039
#!/usr/bin/env python # -*- coding: utf-8 -*- """ trace/generated-tracers.h """ __author__ = "Lluís Vilanova <vilanova@ac.upc.edu>" __copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>" __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@linux.vnet.ibm.com" from tracetool import out def generate(events, backend): out('/* This file is autogenerated by tracetool, do not edit. */', '', '#ifndef TRACE__GENERATED_TRACERS_H', '#define TRACE__GENERATED_TRACERS_H', '', '#include "qemu-common.h"', '') backend.generate_begin(events) for e in events: out('', 'static inline void %(api)s(%(args)s)', '{', api=e.api(), args=e.args) if "disable" not in e.properties: backend.generate(e) out('}') backend.generate_end(events) out('#endif /* TRACE__GENERATED_TRACERS_H */')
gpl-2.0
kurtrwall/wagtail
wagtail/wagtailimages/tests/test_models.py
2
17102
from __future__ import absolute_import, unicode_literals import unittest from django.contrib.auth import get_user_model from django.contrib.auth.models import Group, Permission from django.core.files.uploadedfile import SimpleUploadedFile from django.core.urlresolvers import reverse from django.db.utils import IntegrityError from django.test import TestCase from django.test.utils import override_settings from willow.image import Image as WillowImage from wagtail.tests.testapp.models import EventPage, EventPageCarouselItem from wagtail.tests.utils import WagtailTestUtils from wagtail.wagtailcore.models import Collection, GroupCollectionPermission, Page from wagtail.wagtailimages.models import Rendition, SourceImageIOError from wagtail.wagtailimages.rect import Rect from .utils import Image, get_test_image_file class TestImage(TestCase): def setUp(self): # Create an image for running tests on self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) def test_is_portrait(self): self.assertFalse(self.image.is_portrait()) def test_is_landscape(self): self.assertTrue(self.image.is_landscape()) def test_get_rect(self): self.assertTrue(self.image.get_rect(), Rect(0, 0, 640, 480)) def test_get_focal_point(self): self.assertEqual(self.image.get_focal_point(), None) # Add a focal point to the image self.image.focal_point_x = 100 self.image.focal_point_y = 200 self.image.focal_point_width = 50 self.image.focal_point_height = 20 # Get it self.assertEqual(self.image.get_focal_point(), Rect(75, 190, 125, 210)) def test_has_focal_point(self): self.assertFalse(self.image.has_focal_point()) # Add a focal point to the image self.image.focal_point_x = 100 self.image.focal_point_y = 200 self.image.focal_point_width = 50 self.image.focal_point_height = 20 self.assertTrue(self.image.has_focal_point()) def test_set_focal_point(self): self.assertEqual(self.image.focal_point_x, None) self.assertEqual(self.image.focal_point_y, None) self.assertEqual(self.image.focal_point_width, None) self.assertEqual(self.image.focal_point_height, None) self.image.set_focal_point(Rect(100, 150, 200, 350)) self.assertEqual(self.image.focal_point_x, 150) self.assertEqual(self.image.focal_point_y, 250) self.assertEqual(self.image.focal_point_width, 100) self.assertEqual(self.image.focal_point_height, 200) self.image.set_focal_point(None) self.assertEqual(self.image.focal_point_x, None) self.assertEqual(self.image.focal_point_y, None) self.assertEqual(self.image.focal_point_width, None) self.assertEqual(self.image.focal_point_height, None) def test_is_stored_locally(self): self.assertTrue(self.image.is_stored_locally()) @override_settings(DEFAULT_FILE_STORAGE='wagtail.tests.dummy_external_storage.DummyExternalStorage') def test_is_stored_locally_with_external_storage(self): self.assertFalse(self.image.is_stored_locally()) class TestImageQuerySet(TestCase): def test_search_method(self): # Create an image for running tests on image = Image.objects.create( title="Test image", file=get_test_image_file(), ) # Search for it results = Image.objects.search("Test") self.assertEqual(list(results), [image]) def test_operators(self): aaa_image = Image.objects.create( title="AAA Test image", file=get_test_image_file(), ) zzz_image = Image.objects.create( title="ZZZ Test image", file=get_test_image_file(), ) results = Image.objects.search("aaa test", operator='and') self.assertEqual(list(results), [aaa_image]) results = Image.objects.search("aaa test", operator='or') sorted_results = sorted(results, key=lambda img: img.title) self.assertEqual(sorted_results, [aaa_image, zzz_image]) def test_custom_ordering(self): aaa_image = Image.objects.create( title="AAA Test image", file=get_test_image_file(), ) zzz_image = Image.objects.create( title="ZZZ Test image", file=get_test_image_file(), ) results = Image.objects.order_by('title').search("Test") self.assertEqual(list(results), [aaa_image, zzz_image]) results = Image.objects.order_by('-title').search("Test") self.assertEqual(list(results), [zzz_image, aaa_image]) def test_search_indexing_prefetches_tags(self): for i in range(0, 10): image = Image.objects.create( title="Test image %d" % i, file=get_test_image_file(), ) image.tags.add('aardvark', 'artichoke', 'armadillo') with self.assertNumQueries(2): results = { image.title: [tag.name for tag in image.tags.all()] for image in Image.get_indexed_objects() } self.assertTrue('aardvark' in results['Test image 0']) class TestImagePermissions(TestCase): def setUp(self): # Create some user accounts for testing permissions User = get_user_model() self.user = User.objects.create_user(username='user', email='user@email.com', password='password') self.owner = User.objects.create_user(username='owner', email='owner@email.com', password='password') self.editor = User.objects.create_user(username='editor', email='editor@email.com', password='password') self.editor.groups.add(Group.objects.get(name='Editors')) self.administrator = User.objects.create_superuser( username='administrator', email='administrator@email.com', password='password' ) # Owner user must have the add_image permission image_adders_group = Group.objects.create(name="Image adders") GroupCollectionPermission.objects.create( group=image_adders_group, collection=Collection.get_first_root_node(), permission=Permission.objects.get(codename='add_image'), ) self.owner.groups.add(image_adders_group) # Create an image for running tests on self.image = Image.objects.create( title="Test image", uploaded_by_user=self.owner, file=get_test_image_file(), ) def test_administrator_can_edit(self): self.assertTrue(self.image.is_editable_by_user(self.administrator)) def test_editor_can_edit(self): self.assertTrue(self.image.is_editable_by_user(self.editor)) def test_owner_can_edit(self): self.assertTrue(self.image.is_editable_by_user(self.owner)) def test_user_cant_edit(self): self.assertFalse(self.image.is_editable_by_user(self.user)) class TestRenditions(TestCase): def setUp(self): # Create an image for running tests on self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) def test_get_rendition_model(self): self.assertIs(Image.get_rendition_model(), Rendition) def test_minification(self): rendition = self.image.get_rendition('width-400') # Check size self.assertEqual(rendition.width, 400) self.assertEqual(rendition.height, 300) def test_resize_to_max(self): rendition = self.image.get_rendition('max-100x100') # Check size self.assertEqual(rendition.width, 100) self.assertEqual(rendition.height, 75) def test_resize_to_min(self): rendition = self.image.get_rendition('min-120x120') # Check size self.assertEqual(rendition.width, 160) self.assertEqual(rendition.height, 120) def test_resize_to_original(self): rendition = self.image.get_rendition('original') # Check size self.assertEqual(rendition.width, 640) self.assertEqual(rendition.height, 480) def test_cache(self): # Get two renditions with the same filter first_rendition = self.image.get_rendition('width-400') second_rendition = self.image.get_rendition('width-400') # Check that they are the same object self.assertEqual(first_rendition, second_rendition) def test_alt_attribute(self): rendition = self.image.get_rendition('width-400') self.assertEqual(rendition.alt, "Test image") class TestUsageCount(TestCase): fixtures = ['test.json'] def setUp(self): self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) @override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True) def test_unused_image_usage_count(self): self.assertEqual(self.image.get_usage().count(), 0) @override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True) def test_used_image_document_usage_count(self): page = EventPage.objects.get(id=4) event_page_carousel_item = EventPageCarouselItem() event_page_carousel_item.page = page event_page_carousel_item.image = self.image event_page_carousel_item.save() self.assertEqual(self.image.get_usage().count(), 1) class TestGetUsage(TestCase): fixtures = ['test.json'] def setUp(self): self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) def test_image_get_usage_not_enabled(self): self.assertEqual(list(self.image.get_usage()), []) @override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True) def test_unused_image_get_usage(self): self.assertEqual(list(self.image.get_usage()), []) @override_settings(WAGTAIL_USAGE_COUNT_ENABLED=True) def test_used_image_document_get_usage(self): page = EventPage.objects.get(id=4) event_page_carousel_item = EventPageCarouselItem() event_page_carousel_item.page = page event_page_carousel_item.image = self.image event_page_carousel_item.save() self.assertTrue(issubclass(Page, type(self.image.get_usage()[0]))) class TestGetWillowImage(TestCase): fixtures = ['test.json'] def setUp(self): self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) def test_willow_image_object_returned(self): with self.image.get_willow_image() as willow_image: self.assertIsInstance(willow_image, WillowImage) def test_with_missing_image(self): # Image id=1 in test fixtures has a missing image file bad_image = Image.objects.get(id=1) # Attempting to get the Willow image for images without files # should raise a SourceImageIOError with self.assertRaises(SourceImageIOError): with bad_image.get_willow_image(): self.fail() # Shouldn't get here def test_closes_image(self): # This tests that willow closes images after use with self.image.get_willow_image(): self.assertFalse(self.image.file.closed) self.assertTrue(self.image.file.closed) def test_closes_image_on_exception(self): # This tests that willow closes images when the with is exited with an exception try: with self.image.get_willow_image(): self.assertFalse(self.image.file.closed) raise ValueError("Something went wrong!") except ValueError: pass self.assertTrue(self.image.file.closed) def test_doesnt_close_open_image(self): # This tests that when the image file is already open, get_willow_image doesn't close it (#1256) self.image.file.open('rb') with self.image.get_willow_image(): pass self.assertFalse(self.image.file.closed) self.image.file.close() class TestIssue573(TestCase): """ This tests for a bug which causes filename limit on Renditions to be reached when the Image has a long original filename and a big focal point key """ def test_issue_573(self): # Create an image with a big filename and focal point image = Image.objects.create( title="Test image", file=get_test_image_file( 'thisisaverylongfilename-abcdefghijklmnopqrstuvwxyz-supercalifragilisticexpialidocious.png' ), focal_point_x=1000, focal_point_y=1000, focal_point_width=1000, focal_point_height=1000, ) # Try creating a rendition from that image # This would crash if the bug is present image.get_rendition('fill-800x600') @override_settings(_WAGTAILSEARCH_FORCE_AUTO_UPDATE=['elasticsearch']) class TestIssue613(TestCase, WagtailTestUtils): def get_elasticsearch_backend(self): from django.conf import settings from wagtail.wagtailsearch.backends import get_search_backend backend_path = 'wagtail.wagtailsearch.backends.elasticsearch' # Search WAGTAILSEARCH_BACKENDS for an entry that uses the given backend path for backend_name, backend_conf in settings.WAGTAILSEARCH_BACKENDS.items(): if backend_conf['BACKEND'] == backend_path: return get_search_backend(backend_name) else: # no conf entry found - skip tests for this backend raise unittest.SkipTest("No WAGTAILSEARCH_BACKENDS entry for the backend %s" % backend_path) def setUp(self): self.search_backend = self.get_elasticsearch_backend() self.login() def add_image(self, **params): post_data = { 'title': "Test image", 'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()), } post_data.update(params) response = self.client.post(reverse('wagtailimages:add'), post_data) # Should redirect back to index self.assertRedirects(response, reverse('wagtailimages:index')) # Check that the image was created images = Image.objects.filter(title="Test image") self.assertEqual(images.count(), 1) # Test that size was populated correctly image = images.first() self.assertEqual(image.width, 640) self.assertEqual(image.height, 480) return image def edit_image(self, **params): # Create an image to edit self.image = Image.objects.create( title="Test image", file=get_test_image_file(), ) # Edit it post_data = { 'title': "Edited", } post_data.update(params) response = self.client.post(reverse('wagtailimages:edit', args=(self.image.id,)), post_data) # Should redirect back to index self.assertRedirects(response, reverse('wagtailimages:index')) # Check that the image was edited image = Image.objects.get(id=self.image.id) self.assertEqual(image.title, "Edited") return image def test_issue_613_on_add(self): # Reset the search index self.search_backend.reset_index() self.search_backend.add_type(Image) # Add an image with some tags image = self.add_image(tags="hello") self.search_backend.refresh_index() # Search for it by tag results = self.search_backend.search("hello", Image) # Check self.assertEqual(len(results), 1) self.assertEqual(results[0].id, image.id) def test_issue_613_on_edit(self): # Reset the search index self.search_backend.reset_index() self.search_backend.add_type(Image) # Add an image with some tags image = self.edit_image(tags="hello") self.search_backend.refresh_index() # Search for it by tag results = self.search_backend.search("hello", Image) # Check self.assertEqual(len(results), 1) self.assertEqual(results[0].id, image.id) class TestIssue312(TestCase): def test_duplicate_renditions(self): # Create an image image = Image.objects.create( title="Test image", file=get_test_image_file(), ) # Get two renditions and check that they're the same rend1 = image.get_rendition('fill-100x100') rend2 = image.get_rendition('fill-100x100') self.assertEqual(rend1, rend2) # Now manually duplicate the renditon and check that the database blocks it self.assertRaises( IntegrityError, Rendition.objects.create, image=rend1.image, filter=rend1.filter, width=rend1.width, height=rend1.height, focal_point_key=rend1.focal_point_key, )
bsd-3-clause
rapidhere/rpbtman_autosign
pytz/zoneinfo/NZ.py
9
6854
'''tzinfo timezone information for NZ.''' from pytz.tzinfo import DstTzInfo from pytz.tzinfo import memorized_datetime as d from pytz.tzinfo import memorized_ttinfo as i class NZ(DstTzInfo): '''NZ timezone definition. See datetime.tzinfo for details''' zone = 'NZ' _utc_transition_times = [ d(1,1,1,0,0,0), d(1927,11,5,14,30,0), d(1928,3,3,13,30,0), d(1928,10,13,14,30,0), d(1929,3,16,14,0,0), d(1929,10,12,14,30,0), d(1930,3,15,14,0,0), d(1930,10,11,14,30,0), d(1931,3,14,14,0,0), d(1931,10,10,14,30,0), d(1932,3,19,14,0,0), d(1932,10,8,14,30,0), d(1933,3,18,14,0,0), d(1933,10,7,14,30,0), d(1934,4,28,14,0,0), d(1934,9,29,14,30,0), d(1935,4,27,14,0,0), d(1935,9,28,14,30,0), d(1936,4,25,14,0,0), d(1936,9,26,14,30,0), d(1937,4,24,14,0,0), d(1937,9,25,14,30,0), d(1938,4,23,14,0,0), d(1938,9,24,14,30,0), d(1939,4,29,14,0,0), d(1939,9,23,14,30,0), d(1940,4,27,14,0,0), d(1940,9,28,14,30,0), d(1945,12,31,12,0,0), d(1974,11,2,14,0,0), d(1975,2,22,14,0,0), d(1975,10,25,14,0,0), d(1976,3,6,14,0,0), d(1976,10,30,14,0,0), d(1977,3,5,14,0,0), d(1977,10,29,14,0,0), d(1978,3,4,14,0,0), d(1978,10,28,14,0,0), d(1979,3,3,14,0,0), d(1979,10,27,14,0,0), d(1980,3,1,14,0,0), d(1980,10,25,14,0,0), d(1981,2,28,14,0,0), d(1981,10,24,14,0,0), d(1982,3,6,14,0,0), d(1982,10,30,14,0,0), d(1983,3,5,14,0,0), d(1983,10,29,14,0,0), d(1984,3,3,14,0,0), d(1984,10,27,14,0,0), d(1985,3,2,14,0,0), d(1985,10,26,14,0,0), d(1986,3,1,14,0,0), d(1986,10,25,14,0,0), d(1987,2,28,14,0,0), d(1987,10,24,14,0,0), d(1988,3,5,14,0,0), d(1988,10,29,14,0,0), d(1989,3,4,14,0,0), d(1989,10,7,14,0,0), d(1990,3,17,14,0,0), d(1990,10,6,14,0,0), d(1991,3,16,14,0,0), d(1991,10,5,14,0,0), d(1992,3,14,14,0,0), d(1992,10,3,14,0,0), d(1993,3,20,14,0,0), d(1993,10,2,14,0,0), d(1994,3,19,14,0,0), d(1994,10,1,14,0,0), d(1995,3,18,14,0,0), d(1995,9,30,14,0,0), d(1996,3,16,14,0,0), d(1996,10,5,14,0,0), d(1997,3,15,14,0,0), d(1997,10,4,14,0,0), d(1998,3,14,14,0,0), d(1998,10,3,14,0,0), d(1999,3,20,14,0,0), d(1999,10,2,14,0,0), d(2000,3,18,14,0,0), d(2000,9,30,14,0,0), d(2001,3,17,14,0,0), d(2001,10,6,14,0,0), d(2002,3,16,14,0,0), d(2002,10,5,14,0,0), d(2003,3,15,14,0,0), d(2003,10,4,14,0,0), d(2004,3,20,14,0,0), d(2004,10,2,14,0,0), d(2005,3,19,14,0,0), d(2005,10,1,14,0,0), d(2006,3,18,14,0,0), d(2006,9,30,14,0,0), d(2007,3,17,14,0,0), d(2007,10,6,14,0,0), d(2008,3,15,14,0,0), d(2008,10,4,14,0,0), d(2009,3,14,14,0,0), d(2009,10,3,14,0,0), d(2010,3,20,14,0,0), d(2010,10,2,14,0,0), d(2011,3,19,14,0,0), d(2011,10,1,14,0,0), d(2012,3,17,14,0,0), d(2012,10,6,14,0,0), d(2013,3,16,14,0,0), d(2013,10,5,14,0,0), d(2014,3,15,14,0,0), d(2014,10,4,14,0,0), d(2015,3,14,14,0,0), d(2015,10,3,14,0,0), d(2016,3,19,14,0,0), d(2016,10,1,14,0,0), d(2017,3,18,14,0,0), d(2017,9,30,14,0,0), d(2018,3,17,14,0,0), d(2018,10,6,14,0,0), d(2019,3,16,14,0,0), d(2019,10,5,14,0,0), d(2020,3,14,14,0,0), d(2020,10,3,14,0,0), d(2021,3,20,14,0,0), d(2021,10,2,14,0,0), d(2022,3,19,14,0,0), d(2022,10,1,14,0,0), d(2023,3,18,14,0,0), d(2023,9,30,14,0,0), d(2024,3,16,14,0,0), d(2024,10,5,14,0,0), d(2025,3,15,14,0,0), d(2025,10,4,14,0,0), d(2026,3,14,14,0,0), d(2026,10,3,14,0,0), d(2027,3,20,14,0,0), d(2027,10,2,14,0,0), d(2028,3,18,14,0,0), d(2028,9,30,14,0,0), d(2029,3,17,14,0,0), d(2029,10,6,14,0,0), d(2030,3,16,14,0,0), d(2030,10,5,14,0,0), d(2031,3,15,14,0,0), d(2031,10,4,14,0,0), d(2032,3,20,14,0,0), d(2032,10,2,14,0,0), d(2033,3,19,14,0,0), d(2033,10,1,14,0,0), d(2034,3,18,14,0,0), d(2034,9,30,14,0,0), d(2035,3,17,14,0,0), d(2035,10,6,14,0,0), d(2036,3,15,14,0,0), d(2036,10,4,14,0,0), d(2037,3,14,14,0,0), d(2037,10,3,14,0,0), ] _transition_info = [ i(41400,0,'NZMT'), i(45000,3600,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(41400,0,'NZMT'), i(43200,1800,'NZST'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), i(43200,0,'NZST'), i(46800,3600,'NZDT'), ] NZ = NZ()
gpl-3.0
zhaochao/fuel-web
nailgun/nailgun/api/v1/handlers/logs.py
1
15272
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handlers dealing with logs """ from itertools import dropwhile import logging import os import re import time from oslo.serialization import jsonutils import web from nailgun import consts from nailgun import objects from nailgun.api.v1.handlers.base import BaseHandler from nailgun.api.v1.handlers.base import content from nailgun.settings import settings from nailgun.task.manager import DumpTaskManager from nailgun.task.task import DumpTask logger = logging.getLogger(__name__) def read_backwards(file, from_byte=None, bufsize=0x20000): cache_pos = file.tell() file.seek(0, os.SEEK_END) size = file.tell() file.seek(cache_pos, os.SEEK_SET) if size == 0: return if from_byte is None: from_byte = size lines = [''] read_size = bufsize rem = from_byte % bufsize if rem == 0: # Perform bufsize reads only pos = max(0, (from_byte // bufsize - 1) * bufsize) else: # One more iteration will be done to read rem bytes so that we # are aligned to exactly bufsize reads later on read_size = rem pos = (from_byte // bufsize) * bufsize while pos >= 0: file.seek(pos, os.SEEK_SET) data = file.read(read_size) + lines[0] lines = re.findall('[^\n]*\n?', data) ix = len(lines) - 2 while ix > 0: yield lines[ix] ix -= 1 pos -= bufsize read_size = bufsize else: yield lines[0] # Set cursor position to last read byte try: file.seek(max(0, pos), os.SEEK_SET) except IOError: pass # It turns out that strftime/strptime are costly functions in Python # http://stackoverflow.com/questions/13468126/a-faster-strptime # We don't call them if the log and UI date formats aren't very different STRPTIME_PERFORMANCE_HACK = {} if settings.UI_LOG_DATE_FORMAT == '%Y-%m-%d %H:%M:%S': STRPTIME_PERFORMANCE_HACK = { '%Y-%m-%dT%H:%M:%S': lambda date: date.replace('T', ' '), '%Y-%m-%d %H:%M:%S': lambda date: date, } def read_log( log_file=None, level=None, log_config={}, max_entries=None, regexp=None, from_byte=-1, fetch_older=False, to_byte=0, **kwargs): has_more = False entries = [] log_date_format = log_config['date_format'] multiline = log_config.get('multiline', False) skip_regexp = None if 'skip_regexp' in log_config: skip_regexp = re.compile(log_config['skip_regexp']) allowed_levels = log_config['levels'] if level: allowed_levels = list(dropwhile(lambda l: l != level, log_config['levels'])) log_file_size = os.stat(log_file).st_size if log_date_format in STRPTIME_PERFORMANCE_HACK: strptime_function = STRPTIME_PERFORMANCE_HACK[log_date_format] else: strptime_function = lambda date: time.strftime( settings.UI_LOG_DATE_FORMAT, time.strptime(date, log_date_format) ) with open(log_file, 'r') as f: # we need to calculate current position manually instead of using # tell() because read_backwards uses buffering f.seek(0, os.SEEK_END) pos = f.tell() if from_byte != -1 and fetch_older: pos = from_byte multilinebuf = [] for line in read_backwards(f, from_byte=pos): pos -= len(line) if not fetch_older and pos < to_byte: has_more = pos > 0 break entry = line.rstrip('\n') if not len(entry): continue if skip_regexp and skip_regexp.match(entry): continue m = regexp.match(entry) if m is None: if multiline: # Add next multiline part to last entry if it exist. multilinebuf.append(entry) else: logger.debug("Unable to parse log entry '%s' from %s", entry, log_file) continue entry_text = m.group('text') if len(multilinebuf): multilinebuf.reverse() entry_text += '\n' + '\n'.join(multilinebuf) multilinebuf = [] entry_level = m.group('level').upper() or 'INFO' if level and not (entry_level in allowed_levels): continue try: entry_date = strptime_function(m.group('date')) except ValueError: logger.debug("Unable to parse date from log entry." " Date format: %r, date part of entry: %r", log_date_format, m.group('date')) continue entries.append([ entry_date, entry_level, entry_text ]) if len(entries) >= max_entries: has_more = True break if fetch_older or (not fetch_older and from_byte == -1): from_byte = pos if from_byte == 0: has_more = False return { 'entries': entries, 'from': from_byte, 'to': log_file_size, 'has_more': has_more, } class LogEntryCollectionHandler(BaseHandler): """Log entry collection handler """ @content def GET(self): """Receives following parameters: - *date_before* - get logs before this date - *date_after* - get logs after this date - *source* - source of logs - *node* - node id (for getting node logs) - *level* - log level (all levels showed by default) - *to* - number of entries - *max_entries* - max number of entries to load :returns: Collection of log entries, log file size and if there are new entries. :http: * 200 (OK) * 400 (invalid *date_before* value) * 400 (invalid *date_after* value) * 400 (invalid *source* value) * 400 (invalid *node* value) * 400 (invalid *level* value) * 400 (invalid *to* value) * 400 (invalid *max_entries* value) * 404 (log file not found) * 404 (log files dir not found) * 404 (node not found) * 500 (node has no assigned ip) * 500 (invalid regular expression in config) """ data = self.read_and_validate_data() log_file = data['log_file'] fetch_older = data['fetch_older'] from_byte = data['from_byte'] to_byte = data['to_byte'] log_file_size = os.stat(log_file).st_size if (not fetch_older and to_byte >= log_file_size) or \ (fetch_older and from_byte == 0): return jsonutils.dumps({ 'entries': [], 'from': from_byte, 'to': log_file_size, 'has_more': False, }) return read_log(**data) def read_and_validate_data(self): user_data = web.input() if not user_data.get('source'): logger.debug("'source' must be specified") raise self.http(400, "'source' must be specified") try: max_entries = int(user_data.get('max_entries', settings.TRUNCATE_LOG_ENTRIES)) except ValueError: logger.debug("Invalid 'max_entries' value: %r", user_data.get('max_entries')) raise self.http(400, "Invalid 'max_entries' value") from_byte = None try: from_byte = int(user_data.get('from', -1)) except ValueError: logger.debug("Invalid 'from' value: %r", user_data.get('from')) raise self.http(400, "Invalid 'from' value") to_byte = None try: to_byte = int(user_data.get('to', 0)) except ValueError: logger.debug("Invalid 'to' value: %r", user_data.get('to')) raise self.http(400, "Invalid 'to' value") fetch_older = 'fetch_older' in user_data and \ user_data['fetch_older'].lower() in ('1', 'true') date_before = user_data.get('date_before') if date_before: try: date_before = time.strptime(date_before, settings.UI_LOG_DATE_FORMAT) except ValueError: logger.debug("Invalid 'date_before' value: %r", date_before) raise self.http(400, "Invalid 'date_before' value") date_after = user_data.get('date_after') if date_after: try: date_after = time.strptime(date_after, settings.UI_LOG_DATE_FORMAT) except ValueError: logger.debug("Invalid 'date_after' value: %r", date_after) raise self.http(400, "Invalid 'date_after' value") log_config = filter(lambda lc: lc['id'] == user_data.get('source'), settings.LOGS) # If log source not found or it is fake source but we are run without # fake tasks. if not log_config or (log_config[0].get('fake') and not settings.FAKE_TASKS): logger.debug("Log source %r not found", user_data.get('source')) raise self.http(404, "Log source not found") log_config = log_config[0] # If it is 'remote' and not 'fake' log source then calculate log file # path by base dir, node IP and relative path to file. # Otherwise return absolute path. node = None if log_config['remote'] and not log_config.get('fake'): if not user_data.get('node'): raise self.http(400, "'node' must be specified") node = objects.Node.get_by_uid(user_data.get('node')) if not node: raise self.http(404, "Node not found") if not node.ip: logger.error('Node %r has no assigned ip', node.id) raise self.http(500, "Node has no assigned ip") if node.status == consts.NODE_STATUSES.discover: ndir = node.ip else: ndir = node.fqdn remote_log_dir = os.path.join(log_config['base'], ndir) if not os.path.exists(remote_log_dir): logger.debug("Log files dir %r for node %s not found", remote_log_dir, node.id) raise self.http(404, "Log files dir for node not found") log_file = os.path.join(remote_log_dir, log_config['path']) else: log_file = log_config['path'] if not os.path.exists(log_file): if node: logger.debug("Log file %r for node %s not found", log_file, node.id) else: logger.debug("Log file %r not found", log_file) raise self.http(404, "Log file not found") level = user_data.get('level') if level is not None and level not in log_config['levels']: raise self.http(400, "Invalid level") try: regexp = re.compile(log_config['regexp']) except re.error: logger.exception('Invalid regular expression for file %r', log_config['id']) raise self.http(500, "Invalid regular expression in config") if 'skip_regexp' in log_config: try: re.compile(log_config['skip_regexp']) except re.error: logger.exception('Invalid regular expression for file %r', log_config['id']) raise self.http(500, "Invalid regular expression in config") return { 'date_after': date_after, 'date_before': date_before, 'level': level, 'log_file': log_file, 'log_config': log_config, 'max_entries': max_entries, 'node': node, 'regexp': regexp, 'fetch_older': fetch_older, 'from_byte': from_byte, 'to_byte': to_byte, } class LogPackageHandler(BaseHandler): """Log package handler """ @content def PUT(self): """:returns: JSONized Task object. :http: * 200 (task successfully executed) * 400 (data validation failed) * 404 (cluster not found in db) """ try: conf = jsonutils.loads(web.data()) if web.data() else None task_manager = DumpTaskManager() task = task_manager.execute(conf=conf) except Exception as exc: logger.warn(u'DumpTask: error while execution ' 'dump environment task: {0}'.format(str(exc))) raise self.http(400, str(exc)) self.raise_task(task) class LogPackageDefaultConfig(BaseHandler): @content def GET(self): """Generates default config for snapshot :http: * 200 """ return DumpTask.conf() class LogSourceCollectionHandler(BaseHandler): """Log source collection handler """ @content def GET(self): """:returns: Collection of log sources (from settings) :http: * 200 (OK) """ return settings.LOGS class LogSourceByNodeCollectionHandler(BaseHandler): """Log source by node collection handler """ @content def GET(self, node_id): """:returns: Collection of log sources by node (from settings) :http: * 200 (OK) * 404 (node not found in db) """ node = self.get_object_or_404(objects.Node, node_id) def getpath(x): if x.get('fake'): if settings.FAKE_TASKS: return x['path'] else: return '' else: if node.status == consts.NODE_STATUSES.discover: ndir = node.ip else: ndir = node.fqdn return os.path.join(x['base'], ndir, x['path']) f = lambda x: ( x.get('remote') and x.get('path') and x.get('base') and os.access(getpath(x), os.R_OK) and os.path.isfile(getpath(x)) ) sources = filter(f, settings.LOGS) return sources
apache-2.0
SickGear/SickGear
lib/hachoir_py2/parser/archive/bzip2_parser.py
2
9443
""" BZIP2 archive file Author: Victor Stinner, Robert Xiao """ from hachoir_py2.parser import Parser from hachoir_py2.core.tools import paddingSize from hachoir_py2.field import (Field, FieldSet, GenericVector, ParserError, String, PaddingBits, Bit, Bits, Character, UInt32, Enum, CompressedField) from hachoir_py2.core.endian import BIG_ENDIAN from hachoir_py2.core.text_handler import textHandler, hexadecimal from hachoir_py2.parser.archive.zlib import build_tree, HuffmanCode try: from bz2 import BZ2Decompressor class Bunzip2: def __init__(self, stream): self.bzip2 = BZ2Decompressor() def __call__(self, size, data=''): try: return self.bzip2.decompress(data) except EOFError: return '' has_deflate = True except ImportError: has_deflate = False class ZeroTerminatedNumber(Field): """Zero (bit) terminated number: e.g. 11110 is 4.""" def __init__(self, parent, name, description=None): Field.__init__(self, parent, name, 0, description) endian = self.parent.endian stream = self.parent.stream addr = self.absolute_address value = 0 while True: bit = stream.readBits(addr, 1, endian) addr += 1 self._size += 1 if not bit: break value += 1 self._value = value def createValue(self): return self._value def move_to_front(seq, index): seq[:] = seq[index:index + 1] + seq[0:index] + seq[index + 1:] class Bzip2Bitmap(FieldSet): def __init__(self, parent, name, nb_items, start_index, *args, **kwargs): FieldSet.__init__(self, parent, name, *args, **kwargs) self.nb_items = nb_items self.start_index = start_index def createFields(self): for i in xrange(self.start_index, self.start_index + self.nb_items): yield Bit(self, "symbol_used[%i]" % i, "Is the symbol %i (%r) used?" % (i, chr(i))) class Bzip2Lengths(FieldSet): def __init__(self, parent, name, symbols, *args, **kwargs): FieldSet.__init__(self, parent, name, *args, **kwargs) self.symbols = symbols def createFields(self): yield Bits(self, "start_length", 5) length = self["start_length"].value lengths = [] for i in xrange(self.symbols): while True: bit = Bit(self, "change_length[%i][]" % i, "Should the length be changed for symbol %i?" % i) yield bit if not bit.value: break else: bit = Enum(Bit(self, "length_decrement[%i][]" % i, "Decrement the value?"), {True: "Decrement", False: "Increment"}) yield bit if bit.value: length -= 1 else: length += 1 lengths.append(length) self.final_length = length self.tree = build_tree(lengths) class Bzip2Selectors(FieldSet): def __init__(self, parent, name, ngroups, *args, **kwargs): FieldSet.__init__(self, parent, name, *args, **kwargs) self.groups = range(ngroups) def createFields(self): for i in xrange(self["../selectors_used"].value): field = ZeroTerminatedNumber(self, "selector_list[]") move_to_front(self.groups, field.value) field.realvalue = self.groups[0] field._description = "MTF'ed selector index: raw value %i, real value %i" % (field.value, field.realvalue) yield field class Bzip2Block(FieldSet): def createFields(self): yield textHandler(Bits(self, "blockheader", 48, "Block header"), hexadecimal) if self["blockheader"].value != 0x314159265359: # pi raise ParserError("Invalid block header!") yield textHandler(UInt32(self, "crc32", "CRC32 for this block"), hexadecimal) yield Bit(self, "randomized", "Is this block randomized?") yield Bits(self, "orig_bwt_pointer", 24, "Starting pointer into BWT after untransform") yield GenericVector(self, "huffman_used_map", 16, Bit, 'block_used', "Bitmap showing which blocks (representing 16 literals each) are in use") symbols_used = [] for index, block_used in enumerate(self["huffman_used_map"].array('block_used')): if block_used.value: start_index = index * 16 field = Bzip2Bitmap(self, "huffman_used_bitmap[%i]" % index, 16, start_index, "Bitmap for block %i (literals %i to %i) showing which symbols are in use" % ( index, start_index, start_index + 15)) yield field for i, used in enumerate(field): if used.value: symbols_used.append(start_index + i) yield Bits(self, "huffman_groups", 3, "Number of different Huffman tables in use") yield Bits(self, "selectors_used", 15, "Number of times the Huffman tables are switched") yield Bzip2Selectors(self, "selectors_list", self["huffman_groups"].value) trees = [] for group in xrange(self["huffman_groups"].value): field = Bzip2Lengths(self, "huffman_lengths[]", len(symbols_used) + 2) yield field trees.append(field.tree) counter = 0 rle_run = 0 selector_tree = None while True: if counter % 50 == 0: select_id = self["selectors_list"].array("selector_list")[counter // 50].realvalue selector_tree = trees[select_id] field = HuffmanCode(self, "huffman_code[]", selector_tree) if field.realvalue in [0, 1]: # RLE codes if rle_run == 0: rle_power = 1 rle_run += (field.realvalue + 1) * rle_power rle_power <<= 1 field._description = "RLE Run Code %i (for %r); Total accumulated run %i (Huffman Code %i)" % ( field.realvalue, chr(symbols_used[0]), rle_run, field.value) elif field.realvalue == len(symbols_used) + 1: field._description = "Block Terminator (%i) (Huffman Code %i)" % (field.realvalue, field.value) yield field break else: rle_run = 0 move_to_front(symbols_used, field.realvalue - 1) field._description = "Literal %r (value %i) (Huffman Code %i)" % ( chr(symbols_used[0]), field.realvalue, field.value) yield field if field.realvalue == len(symbols_used) + 1: break counter += 1 class Bzip2Stream(FieldSet): START_BLOCK = 0x314159265359 # pi END_STREAM = 0x177245385090 # sqrt(pi) def createFields(self): end = False while not end: marker = self.stream.readBits(self.absolute_address + self.current_size, 48, self.endian) if marker == self.START_BLOCK: yield Bzip2Block(self, "block[]") elif marker == self.END_STREAM: yield textHandler(Bits(self, "stream_end", 48, "End-of-stream marker"), hexadecimal) yield textHandler(UInt32(self, "crc32", "CRC32 for entire stream"), hexadecimal) padding = paddingSize(self.current_size, 8) if padding: yield PaddingBits(self, "padding[]", padding) end = True else: raise ParserError("Invalid marker 0x%02X!" % marker) class Bzip2Parser(Parser): PARSER_TAGS = { "id": "bzip2", "category": "archive", "file_ext": ("bz2",), "mime": (u"application/x-bzip2",), "min_size": 10 * 8, "magic": (('BZh', 0),), "description": "bzip2 archive" } endian = BIG_ENDIAN def validate(self): if self.stream.readBytes(0, 3) != 'BZh': return "Wrong file signature" if not ("1" <= self["blocksize"].value <= "9"): return "Wrong blocksize" return True def createFields(self): yield String(self, "id", 3, "Identifier (BZh)", charset="ASCII") yield Character(self, "blocksize", "Block size (KB of memory needed to uncompress)") if self._size is None: # TODO: is it possible to handle piped input? raise NotImplementedError size = (self._size - self.current_size) // 8 if size: for tag, filename in self.stream.tags: if tag == "filename" and filename.endswith(".bz2"): filename = filename[:-4] break else: filename = None data = Bzip2Stream(self, "file", size=size * 8) if has_deflate: CompressedField(self, Bunzip2) def createInputStream(**args): if filename: args.setdefault("tags", []).append(("filename", filename)) return self._createInputStream(**args) data._createInputStream = createInputStream yield data
gpl-3.0
sanjeevtripurari/hue
apps/beeswax/src/beeswax/migrations/0008_auto__add_field_queryhistory_query_type.py
37
7995
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): needed_by = ( ("desktop", "0007_auto__add_documentpermission__add_documenttag__add_document"), ) def forwards(self, orm): # Adding field 'QueryHistory.query_type' db.add_column('beeswax_queryhistory', 'query_type', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'QueryHistory.query_type' db.delete_column('beeswax_queryhistory', 'query_type') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'beeswax.metainstall': { 'Meta': {'object_name': 'MetaInstall'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'installed_example': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'beeswax.queryhistory': { 'Meta': {'object_name': 'QueryHistory'}, 'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beeswax.SavedQuery']", 'null': 'True'}), 'has_results': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_state': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'log_context': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'modified_row_count': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'operation_type': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'query': ('django.db.models.fields.TextField', [], {}), 'query_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'server_guid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '1024', 'null': 'True'}), 'server_host': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'server_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}), 'server_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}), 'server_port': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'server_type': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}), 'statement_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'beeswax.savedquery': { 'Meta': {'object_name': 'SavedQuery'}, 'data': ('django.db.models.fields.TextField', [], {'max_length': '65536'}), 'desc': ('django.db.models.fields.TextField', [], {'max_length': '1024'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'mtime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'type': ('django.db.models.fields.IntegerField', [], {}) }, 'beeswax.session': { 'Meta': {'object_name': 'Session'}, 'application': ('django.db.models.fields.CharField', [], {'default': "'beeswax'", 'max_length': '128'}), 'guid': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_used': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'secret': ('django.db.models.fields.TextField', [], {'max_length': "'100'"}), 'server_protocol_version': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'status_code': ('django.db.models.fields.PositiveSmallIntegerField', [], {}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['beeswax']
apache-2.0
kelvinhammond/beets
test/test_replaygain.py
2
5021
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2015, Thomas Scholtes # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. from __future__ import (division, absolute_import, print_function, unicode_literals) from test._common import unittest from test.helper import TestHelper, has_program from beets.mediafile import MediaFile try: import gi gi.require_version('Gst', '1.0') GST_AVAILABLE = True except (ImportError, ValueError): GST_AVAILABLE = False if any(has_program(cmd, ['-v']) for cmd in ['mp3gain', 'aacgain']): GAIN_PROG_AVAILABLE = True else: GAIN_PROG_AVAILABLE = False if has_program('bs1770gain', ['--replaygain']): LOUDNESS_PROG_AVAILABLE = True else: LOUDNESS_PROG_AVAILABLE = False class ReplayGainCliTestBase(TestHelper): def setUp(self): self.setup_beets() try: self.load_plugins('replaygain') except: import sys # store exception info so an error in teardown does not swallow it exc_info = sys.exc_info() try: self.teardown_beets() self.unload_plugins() except: # if load_plugins() failed then setup is incomplete and # teardown operations may fail. In particular # {Item,Album} # may not have the _original_types attribute in unload_plugins pass raise exc_info[1], None, exc_info[2] self.config['replaygain']['backend'] = self.backend album = self.add_album_fixture(2) for item in album.items(): self._reset_replaygain(item) def tearDown(self): self.teardown_beets() self.unload_plugins() def _reset_replaygain(self, item): item['rg_track_peak'] = None item['rg_track_gain'] = None item['rg_album_gain'] = None item['rg_album_gain'] = None item.write() item.store() def test_cli_saves_track_gain(self): for item in self.lib.items(): self.assertIsNone(item.rg_track_peak) self.assertIsNone(item.rg_track_gain) mediafile = MediaFile(item.path) self.assertIsNone(mediafile.rg_track_peak) self.assertIsNone(mediafile.rg_track_gain) self.run_command('replaygain') for item in self.lib.items(): self.assertIsNotNone(item.rg_track_peak) self.assertIsNotNone(item.rg_track_gain) mediafile = MediaFile(item.path) self.assertAlmostEqual( mediafile.rg_track_peak, item.rg_track_peak, places=6) self.assertAlmostEqual( mediafile.rg_track_gain, item.rg_track_gain, places=2) def test_cli_skips_calculated_tracks(self): self.run_command('replaygain') item = self.lib.items()[0] peak = item.rg_track_peak item.rg_track_gain = 0.0 self.run_command('replaygain') self.assertEqual(item.rg_track_gain, 0.0) self.assertEqual(item.rg_track_peak, peak) def test_cli_saves_album_gain_to_file(self): for item in self.lib.items(): mediafile = MediaFile(item.path) self.assertIsNone(mediafile.rg_album_peak) self.assertIsNone(mediafile.rg_album_gain) self.run_command('replaygain', '-a') peaks = [] gains = [] for item in self.lib.items(): mediafile = MediaFile(item.path) peaks.append(mediafile.rg_album_peak) gains.append(mediafile.rg_album_gain) # Make sure they are all the same self.assertEqual(max(peaks), min(peaks)) self.assertEqual(max(gains), min(gains)) self.assertNotEqual(max(gains), 0.0) self.assertNotEqual(max(peaks), 0.0) @unittest.skipIf(not GST_AVAILABLE, 'gstreamer cannot be found') class ReplayGainGstCliTest(ReplayGainCliTestBase, unittest.TestCase): backend = u'gstreamer' @unittest.skipIf(not GAIN_PROG_AVAILABLE, 'no *gain command found') class ReplayGainCmdCliTest(ReplayGainCliTestBase, unittest.TestCase): backend = u'command' @unittest.skipIf(not LOUDNESS_PROG_AVAILABLE, 'bs1770gain cannot be found') class ReplayGainLdnsCliTest(ReplayGainCliTestBase, unittest.TestCase): backend = u'bs1770gain' def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == b'__main__': unittest.main(defaultTest='suite')
mit
dendisuhubdy/tensorflow
tensorflow/contrib/receptive_field/python/util/parse_layer_parameters_test.py
44
6026
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for parse_layer_parameters module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib import slim from tensorflow.contrib.receptive_field.python.util import graph_compute_order from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import nn from tensorflow.python.platform import test def create_test_network(): """Convolutional neural network for test. Returns: name_to_node: Dict keyed by node name, each entry containing the node's NodeDef. """ g = ops.Graph() with g.as_default(): # An input test image with unknown spatial resolution. x = array_ops.placeholder( dtypes.float32, (None, None, None, 1), name='input_image') # Left branch before first addition. l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID') # Right branch before first addition. l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad') l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID') l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME') # First addition. l4 = nn.relu(l1 + l3, name='L4_relu') # Left branch after first addition. l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME') # Right branch after first addition. l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME') # Final addition. gen_math_ops.add(l5, l6, name='L7_add') name_to_node = graph_compute_order.parse_graph_nodes(g.as_graph_def()) return name_to_node class ParseLayerParametersTest(test.TestCase): def testParametersAreParsedCorrectly(self): """Checks parameters from create_test_network() are parsed correctly.""" name_to_node = create_test_network() # L1. l1_node_name = 'L1/Conv2D' l1_params = parse_layer_parameters.get_layer_params( name_to_node[l1_node_name], name_to_node) expected_l1_params = (1, 1, 4, 4, 0, 0, 0, 0) self.assertEqual(l1_params, expected_l1_params) # L2 padding. l2_pad_name = 'L2_pad' l2_pad_params = parse_layer_parameters.get_layer_params( name_to_node[l2_pad_name], name_to_node) expected_l2_pad_params = (1, 1, 1, 1, 1, 1, 1, 1) self.assertEqual(l2_pad_params, expected_l2_pad_params) # L2. l2_node_name = 'L2/Conv2D' l2_params = parse_layer_parameters.get_layer_params( name_to_node[l2_node_name], name_to_node) expected_l2_params = (3, 3, 2, 2, 0, 0, 0, 0) self.assertEqual(l2_params, expected_l2_params) # L3. l3_node_name = 'L3/MaxPool' # - Without knowing input size. l3_params = parse_layer_parameters.get_layer_params( name_to_node[l3_node_name], name_to_node) expected_l3_params = (3, 3, 2, 2, None, None, None, None) self.assertEqual(l3_params, expected_l3_params) # - Input size is even. l3_even_params = parse_layer_parameters.get_layer_params( name_to_node[l3_node_name], name_to_node, input_resolution=[4, 4]) expected_l3_even_params = (3, 3, 2, 2, 0, 0, 1, 1) self.assertEqual(l3_even_params, expected_l3_even_params) # - Input size is odd. l3_odd_params = parse_layer_parameters.get_layer_params( name_to_node[l3_node_name], name_to_node, input_resolution=[5, 5]) expected_l3_odd_params = (3, 3, 2, 2, 1, 1, 2, 2) self.assertEqual(l3_odd_params, expected_l3_odd_params) # L4. l4_node_name = 'L4_relu' l4_params = parse_layer_parameters.get_layer_params( name_to_node[l4_node_name], name_to_node) expected_l4_params = (1, 1, 1, 1, 0, 0, 0, 0) self.assertEqual(l4_params, expected_l4_params) # L5. l5_node_name = 'L5/Conv2D' l5_params = parse_layer_parameters.get_layer_params( name_to_node[l5_node_name], name_to_node) expected_l5_params = (1, 1, 2, 2, 0, 0, 0, 0) self.assertEqual(l5_params, expected_l5_params) # L6. l6_node_name = 'L6/Conv2D' # - Without knowing input size. l6_params = parse_layer_parameters.get_layer_params( name_to_node[l6_node_name], name_to_node) expected_l6_params = (3, 3, 2, 2, None, None, None, None) self.assertEqual(l6_params, expected_l6_params) # - Input size is even. l6_even_params = parse_layer_parameters.get_layer_params( name_to_node[l6_node_name], name_to_node, input_resolution=[4, 4]) expected_l6_even_params = (3, 3, 2, 2, 0, 0, 1, 1) self.assertEqual(l6_even_params, expected_l6_even_params) # - Input size is odd. l6_odd_params = parse_layer_parameters.get_layer_params( name_to_node[l6_node_name], name_to_node, input_resolution=[5, 5]) expected_l6_odd_params = (3, 3, 2, 2, 1, 1, 2, 2) self.assertEqual(l6_odd_params, expected_l6_odd_params) # L7. l7_node_name = 'L7_add' l7_params = parse_layer_parameters.get_layer_params( name_to_node[l7_node_name], name_to_node) expected_l7_params = (1, 1, 1, 1, 0, 0, 0, 0) self.assertEqual(l7_params, expected_l7_params) if __name__ == '__main__': test.main()
apache-2.0
rsoulliere/Evergreen_Mohawk
build/i18n/tests/testSQL.py
11
1833
#!/usr/bin/env python # # Perform the following tests: # 1. Generate a POT file from a set of marked SQL statements # 2. Generate an SQL file from a translated PO file import filecmp import os import subprocess import testhelper import unittest class TestSQLFramework(unittest.TestCase): basedir = os.path.dirname(__file__) script = os.path.join(basedir, '../scripts/db-seed-i18n.py') tmpdirs = [(os.path.join(basedir, 'tmp/'))] sqlsource = os.path.join(basedir, 'data/sqlsource.sql') canonpot = os.path.join(basedir, 'data/sql2pot.pot') canonpo = os.path.join(basedir, 'data/sqlsource.po') testpot = os.path.join(basedir, 'tmp/sql2pot.pot') canonsql = os.path.join(basedir, 'data/po2sql.sql') testsql = os.path.join(basedir, 'tmp/testi18n.sql') def setUp(self): testhelper.setUp(self) def tearDown(self): testhelper.tearDown(self) def testgenpot(self): """ Create a POT file from our test SQL statements. """ subprocess.Popen( ('python', self.script, '--pot', self.sqlsource, '--output', self.testpot), 0, None, None).wait() # avoid basic timestamp conflicts testhelper.mungepothead(self.testpot) testhelper.mungepothead(self.canonpot) self.assertEqual(filecmp.cmp(self.canonpot, self.testpot), 1) def testgensql(self): """ Create a SQL file from a translated PO file. """ devnull = open('/dev/null', 'w') subprocess.Popen( ('python', self.script, '--sql', self.canonpo, '--locale', 'zz-ZZ', '--output', self.testsql), 0, None, None, devnull, devnull).wait() self.assertEqual(filecmp.cmp(self.canonsql, self.testsql), 1) if __name__ == '__main__': unittest.main()
gpl-2.0
mastizada/kuma
vendor/packages/sqlalchemy/test/dialect/test_sqlite.py
7
22296
"""SQLite-specific tests.""" from sqlalchemy.test.testing import eq_, assert_raises, \ assert_raises_message import datetime from sqlalchemy import * from sqlalchemy import exc, sql, schema from sqlalchemy.dialects.sqlite import base as sqlite, \ pysqlite as pysqlite_dialect from sqlalchemy.test import * class TestTypes(TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' def test_boolean(self): """Test that the boolean only treats 1 as True """ meta = MetaData(testing.db) t = Table('bool_table', meta, Column('id', Integer, primary_key=True), Column('boo', Boolean(create_constraint=False))) try: meta.create_all() testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (1, 'false');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (2, 'true');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (3, '1');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (4, '0');") testing.db.execute('INSERT INTO bool_table (id, boo) ' 'VALUES (5, 1);') testing.db.execute('INSERT INTO bool_table (id, boo) ' 'VALUES (6, 0);') eq_(t.select(t.c.boo).order_by(t.c.id).execute().fetchall(), [(3, True), (5, True)]) finally: meta.drop_all() def test_string_dates_raise(self): assert_raises(TypeError, testing.db.execute, select([1]).where(bindparam('date', type_=Date)), date=str(datetime.date(2007, 10, 30))) def test_time_microseconds(self): dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125, ) eq_(str(dt), '2008-06-27 12:00:00.000125') sldt = sqlite.DATETIME() bp = sldt.bind_processor(None) eq_(bp(dt), '2008-06-27 12:00:00.000125') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) def test_native_datetime(self): dbapi = testing.db.dialect.dbapi connect_args = {'detect_types': dbapi.PARSE_DECLTYPES \ | dbapi.PARSE_COLNAMES} engine = engines.testing_engine(options={'connect_args' : connect_args, 'native_datetime': True}) t = Table('datetest', MetaData(), Column('id', Integer, primary_key=True), Column('d1', Date), Column('d2', TIMESTAMP)) t.create(engine) try: engine.execute(t.insert(), {'d1': datetime.date(2010, 5, 10), 'd2': datetime.datetime( 2010, 5, 10, 12, 15, 25, )}) row = engine.execute(t.select()).first() eq_(row, (1, datetime.date(2010, 5, 10), datetime.datetime( 2010, 5, 10, 12, 15, 25, ))) r = engine.execute(func.current_date()).scalar() assert isinstance(r, basestring) finally: t.drop(engine) engine.dispose() def test_no_convert_unicode(self): """test no utf-8 encoding occurs""" dialect = sqlite.dialect() for t in ( String(convert_unicode=True), CHAR(convert_unicode=True), Unicode(), UnicodeText(), String(convert_unicode=True), CHAR(convert_unicode=True), Unicode(), UnicodeText(), ): bindproc = t.dialect_impl(dialect).bind_processor(dialect) assert not bindproc or isinstance(bindproc(u'some string'), unicode) def test_type_reflection(self): # (ask_for, roundtripped_as_if_different) specs = [ (String(), String()), (String(1), String(1)), (String(3), String(3)), (Text(), Text()), (Unicode(), String()), (Unicode(1), String(1)), (Unicode(3), String(3)), (UnicodeText(), Text()), (CHAR(1), ), (CHAR(3), CHAR(3)), (NUMERIC, NUMERIC()), (NUMERIC(10, 2), NUMERIC(10, 2)), (Numeric, NUMERIC()), (Numeric(10, 2), NUMERIC(10, 2)), (DECIMAL, DECIMAL()), (DECIMAL(10, 2), DECIMAL(10, 2)), (Float, Float()), (NUMERIC(), ), (TIMESTAMP, TIMESTAMP()), (DATETIME, DATETIME()), (DateTime, DateTime()), (DateTime(), ), (DATE, DATE()), (Date, Date()), (TIME, TIME()), (Time, Time()), (BOOLEAN, BOOLEAN()), (Boolean, Boolean()), ] columns = [Column('c%i' % (i + 1), t[0]) for (i, t) in enumerate(specs)] db = testing.db m = MetaData(db) t_table = Table('types', m, *columns) m.create_all() try: m2 = MetaData(db) rt = Table('types', m2, autoload=True) try: db.execute('CREATE VIEW types_v AS SELECT * from types') rv = Table('types_v', m2, autoload=True) expected = [len(c) > 1 and c[1] or c[0] for c in specs] for table in rt, rv: for i, reflected in enumerate(table.c): assert isinstance(reflected.type, type(expected[i])), '%d: %r' % (i, type(expected[i])) finally: db.execute('DROP VIEW types_v') finally: m.drop_all() class TestDefaults(TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' @testing.exclude('sqlite', '<', (3, 3, 8), 'sqlite3 changesets 3353 and 3440 modified ' 'behavior of default displayed in pragma ' 'table_info()') def test_default_reflection(self): # (ask_for, roundtripped_as_if_different) specs = [(String(3), '"foo"'), (NUMERIC(10, 2), '100.50'), (Integer, '5'), (Boolean, 'False')] columns = [Column('c%i' % (i + 1), t[0], server_default=text(t[1])) for (i, t) in enumerate(specs)] db = testing.db m = MetaData(db) t_table = Table('t_defaults', m, *columns) try: m.create_all() m2 = MetaData(db) rt = Table('t_defaults', m2, autoload=True) expected = [c[1] for c in specs] for i, reflected in enumerate(rt.c): eq_(str(reflected.server_default.arg), expected[i]) finally: m.drop_all() @testing.exclude('sqlite', '<', (3, 3, 8), 'sqlite3 changesets 3353 and 3440 modified ' 'behavior of default displayed in pragma ' 'table_info()') def test_default_reflection_2(self): db = testing.db m = MetaData(db) expected = ['my_default', '0'] table = \ """CREATE TABLE r_defaults ( data VARCHAR(40) DEFAULT 'my_default', val INTEGER NOT NULL DEFAULT 0 )""" try: db.execute(table) rt = Table('r_defaults', m, autoload=True) for i, reflected in enumerate(rt.c): eq_(str(reflected.server_default.arg), expected[i]) finally: db.execute('DROP TABLE r_defaults') class DialectTest(TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' def test_extra_reserved_words(self): """Tests reserved words in identifiers. 'true', 'false', and 'column' are undocumented reserved words when used as column identifiers (as of 3.5.1). Covering them here to ensure they remain in place if the dialect's reserved_words set is updated in the future. """ meta = MetaData(testing.db) t = Table( 'reserved', meta, Column('safe', Integer), Column('true', Integer), Column('false', Integer), Column('column', Integer), ) try: meta.create_all() t.insert().execute(safe=1) list(t.select().execute()) finally: meta.drop_all() def test_quoted_identifiers(self): """Tests autoload of tables created with quoted column names.""" # This is quirky in sqlite. testing.db.execute("""CREATE TABLE "django_content_type" ( "id" integer NOT NULL PRIMARY KEY, "django_stuff" text NULL ) """) testing.db.execute(""" CREATE TABLE "django_admin_log" ( "id" integer NOT NULL PRIMARY KEY, "action_time" datetime NOT NULL, "content_type_id" integer NULL REFERENCES "django_content_type" ("id"), "object_id" text NULL, "change_message" text NOT NULL ) """) try: meta = MetaData(testing.db) table1 = Table('django_admin_log', meta, autoload=True) table2 = Table('django_content_type', meta, autoload=True) j = table1.join(table2) assert j.onclause.compare(table1.c.content_type_id == table2.c.id) finally: testing.db.execute('drop table django_admin_log') testing.db.execute('drop table django_content_type') def test_attached_as_schema(self): cx = testing.db.connect() try: cx.execute('ATTACH DATABASE ":memory:" AS test_schema') dialect = cx.dialect assert dialect.get_table_names(cx, 'test_schema') == [] meta = MetaData(cx) Table('created', meta, Column('id', Integer), schema='test_schema') alt_master = Table('sqlite_master', meta, autoload=True, schema='test_schema') meta.create_all(cx) eq_(dialect.get_table_names(cx, 'test_schema'), ['created']) assert len(alt_master.c) > 0 meta.clear() reflected = Table('created', meta, autoload=True, schema='test_schema') assert len(reflected.c) == 1 cx.execute(reflected.insert(), dict(id=1)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [(1, )] cx.execute(reflected.update(), dict(id=2)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [(2, )] cx.execute(reflected.delete(reflected.c.id == 2)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [] # note that sqlite_master is cleared, above meta.drop_all() assert dialect.get_table_names(cx, 'test_schema') == [] finally: cx.execute('DETACH DATABASE test_schema') @testing.exclude('sqlite', '<', (2, 6), 'no database support') def test_temp_table_reflection(self): cx = testing.db.connect() try: cx.execute('CREATE TEMPORARY TABLE tempy (id INT)') assert 'tempy' in cx.dialect.get_table_names(cx, None) meta = MetaData(cx) tempy = Table('tempy', meta, autoload=True) assert len(tempy.c) == 1 meta.drop_all() except: try: cx.execute('DROP TABLE tempy') except exc.DBAPIError: pass raise def test_dont_reflect_autoindex(self): meta = MetaData(testing.db) t = Table('foo', meta, Column('bar', String, primary_key=True)) meta.create_all() from sqlalchemy.engine.reflection import Inspector try: inspector = Inspector(testing.db) eq_(inspector.get_indexes('foo'), []) eq_(inspector.get_indexes('foo', include_auto_indexes=True), [{'unique': 1, 'name' : u'sqlite_autoindex_foo_1', 'column_names': [u'bar']}]) finally: meta.drop_all() def test_set_isolation_level(self): """Test setting the read uncommitted/serializable levels""" eng = create_engine(testing.db.url) eq_(eng.execute('PRAGMA read_uncommitted').scalar(), 0) eng = create_engine(testing.db.url, isolation_level='READ UNCOMMITTED') eq_(eng.execute('PRAGMA read_uncommitted').scalar(), 1) eng = create_engine(testing.db.url, isolation_level='SERIALIZABLE') eq_(eng.execute('PRAGMA read_uncommitted').scalar(), 0) assert_raises(exc.ArgumentError, create_engine, testing.db.url, isolation_level='FOO') def test_create_index_with_schema(self): """Test creation of index with explicit schema""" meta = MetaData(testing.db) t = Table('foo', meta, Column('bar', String, index=True), schema='main') try: meta.create_all() finally: meta.drop_all() class SQLTest(TestBase, AssertsCompiledSQL): """Tests SQLite-dialect specific compilation.""" __dialect__ = sqlite.dialect() def test_extract(self): t = sql.table('t', sql.column('col1')) mapping = { 'month': '%m', 'day': '%d', 'year': '%Y', 'second': '%S', 'hour': '%H', 'doy': '%j', 'minute': '%M', 'epoch': '%s', 'dow': '%w', 'week': '%W', } for field, subst in mapping.items(): self.assert_compile(select([extract(field, t.c.col1)]), "SELECT CAST(STRFTIME('%s', t.col1) AS " "INTEGER) AS anon_1 FROM t" % subst) class InsertTest(TestBase, AssertsExecutionResults): """Tests inserts and autoincrement.""" __only_on__ = 'sqlite' # empty insert (i.e. INSERT INTO table DEFAULT VALUES) fails on # 3.3.7 and before def _test_empty_insert(self, table, expect=1): try: table.create() for wanted in expect, expect * 2: table.insert().execute() rows = table.select().execute().fetchall() eq_(len(rows), wanted) finally: table.drop() @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk1(self): self._test_empty_insert(Table('a', MetaData(testing.db), Column('id', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk2(self): assert_raises(exc.DBAPIError, self._test_empty_insert, Table('b' , MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk3(self): assert_raises(exc.DBAPIError, self._test_empty_insert, Table('c' , MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, DefaultClause('123'), primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk4(self): self._test_empty_insert(Table('d', MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, DefaultClause('123' )))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk1(self): self._test_empty_insert(Table('e', MetaData(testing.db), Column('id', Integer))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk2(self): self._test_empty_insert(Table('f', MetaData(testing.db), Column('x', Integer), Column('y', Integer))) def test_inserts_with_spaces(self): tbl = Table('tbl', MetaData('sqlite:///'), Column('with space', Integer), Column('without', Integer)) tbl.create() try: tbl.insert().execute({'without': 123}) assert list(tbl.select().execute()) == [(None, 123)] tbl.insert().execute({'with space': 456}) assert list(tbl.select().execute()) == [(None, 123), (456, None)] finally: tbl.drop() def full_text_search_missing(): """Test if full text search is not implemented and return False if it is and True otherwise.""" try: testing.db.execute('CREATE VIRTUAL TABLE t using FTS3;') testing.db.execute('DROP TABLE t;') return False except: return True class MatchTest(TestBase, AssertsCompiledSQL): __only_on__ = 'sqlite' __skip_if__ = full_text_search_missing, @classmethod def setup_class(cls): global metadata, cattable, matchtable metadata = MetaData(testing.db) testing.db.execute(""" CREATE VIRTUAL TABLE cattable using FTS3 ( id INTEGER NOT NULL, description VARCHAR(50), PRIMARY KEY (id) ) """) cattable = Table('cattable', metadata, autoload=True) testing.db.execute(""" CREATE VIRTUAL TABLE matchtable using FTS3 ( id INTEGER NOT NULL, title VARCHAR(200), category_id INTEGER NOT NULL, PRIMARY KEY (id) ) """) matchtable = Table('matchtable', metadata, autoload=True) metadata.create_all() cattable.insert().execute([{'id': 1, 'description': 'Python'}, {'id': 2, 'description': 'Ruby'}]) matchtable.insert().execute([{'id': 1, 'title' : 'Agile Web Development with Rails' , 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, {'id': 3, 'title' : "Programming Matz's Ruby", 'category_id': 2}, {'id': 4, 'title' : 'The Definitive Guide to Django', 'category_id': 1}, {'id': 5, 'title' : 'Python in a Nutshell', 'category_id': 1}]) @classmethod def teardown_class(cls): metadata.drop_all() def test_expression(self): self.assert_compile(matchtable.c.title.match('somstr'), 'matchtable.title MATCH ?') def test_simple_match(self): results = \ matchtable.select().where(matchtable.c.title.match('python' )).order_by(matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_prefix_match(self): results = \ matchtable.select().where(matchtable.c.title.match('nut*' )).execute().fetchall() eq_([5], [r.id for r in results]) def test_or_match(self): results2 = \ matchtable.select().where( matchtable.c.title.match('nutshell OR ruby' )).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results2 = \ matchtable.select().where( matchtable.c.title.match('python nutshell' )).execute().fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self): results = matchtable.select().where(and_(cattable.c.id == matchtable.c.category_id, cattable.c.description.match('Ruby' ))).order_by(matchtable.c.id).execute().fetchall() eq_([1, 3], [r.id for r in results]) class TestAutoIncrement(TestBase, AssertsCompiledSQL): def test_sqlite_autoincrement(self): table = Table('autoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None), sqlite_autoincrement=True) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)' , dialect=sqlite.dialect()) def test_sqlite_autoincrement_constraint(self): table = Table( 'autoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None), UniqueConstraint('x'), sqlite_autoincrement=True, ) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT, x ' 'INTEGER, UNIQUE (x))', dialect=sqlite.dialect()) def test_sqlite_no_autoincrement(self): table = Table('noautoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None)) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE noautoinctable (id INTEGER ' 'NOT NULL, x INTEGER, PRIMARY KEY (id))', dialect=sqlite.dialect())
mpl-2.0
Jgarcia-IAS/localizacion
openerp/addons-extra/odoo-pruebas/odoo-server/addons/account/__openerp__.py
41
7694
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'eInvoicing', 'version' : '1.1', 'author' : 'OpenERP SA', 'category' : 'Accounting & Finance', 'description' : """ Accounting and Financial Management. ==================================== Financial and accounting module that covers: -------------------------------------------- * General Accounting * Cost/Analytic accounting * Third party accounting * Taxes management * Budgets * Customer and Supplier Invoices * Bank statements * Reconciliation process by partner Creates a dashboard for accountants that includes: -------------------------------------------------- * List of Customer Invoices to Approve * Company Analysis * Graph of Treasury Processes like maintaining general ledgers are done through the defined Financial Journals (entry move line or grouping is maintained through a journal) for a particular financial year and for preparation of vouchers there is a module named account_voucher. """, 'website': 'https://www.odoo.com/page/billing', 'images' : ['images/accounts.jpeg','images/bank_statement.jpeg','images/cash_register.jpeg','images/chart_of_accounts.jpeg','images/customer_invoice.jpeg','images/journal_entries.jpeg'], 'depends' : ['base_setup', 'product', 'analytic', 'board', 'edi', 'report'], 'data': [ 'security/account_security.xml', 'security/ir.model.access.csv', 'account_menuitem.xml', 'report/account_invoice_report_view.xml', 'report/account_entries_report_view.xml', 'report/account_treasury_report_view.xml', 'report/account_report_view.xml', 'report/account_analytic_entries_report_view.xml', 'wizard/account_move_bank_reconcile_view.xml', 'wizard/account_use_model_view.xml', 'account_installer.xml', 'wizard/account_period_close_view.xml', 'wizard/account_reconcile_view.xml', 'wizard/account_unreconcile_view.xml', 'wizard/account_statement_from_invoice_view.xml', 'account_view.xml', 'account_report.xml', 'account_financial_report_data.xml', 'wizard/account_report_common_view.xml', 'wizard/account_invoice_refund_view.xml', 'wizard/account_fiscalyear_close_state.xml', 'wizard/account_chart_view.xml', 'wizard/account_tax_chart_view.xml', 'wizard/account_move_line_reconcile_select_view.xml', 'wizard/account_open_closed_fiscalyear_view.xml', 'wizard/account_move_line_unreconcile_select_view.xml', 'wizard/account_vat_view.xml', 'wizard/account_report_print_journal_view.xml', 'wizard/account_report_general_journal_view.xml', 'wizard/account_report_central_journal_view.xml', 'wizard/account_subscription_generate_view.xml', 'wizard/account_fiscalyear_close_view.xml', 'wizard/account_state_open_view.xml', 'wizard/account_journal_select_view.xml', 'wizard/account_change_currency_view.xml', 'wizard/account_validate_move_view.xml', 'wizard/account_report_general_ledger_view.xml', 'wizard/account_invoice_state_view.xml', 'wizard/account_report_partner_balance_view.xml', 'wizard/account_report_account_balance_view.xml', 'wizard/account_report_aged_partner_balance_view.xml', 'wizard/account_report_partner_ledger_view.xml', 'wizard/account_reconcile_partner_process_view.xml', 'wizard/account_automatic_reconcile_view.xml', 'wizard/account_financial_report_view.xml', 'wizard/pos_box.xml', 'project/wizard/project_account_analytic_line_view.xml', 'account_end_fy.xml', 'account_invoice_view.xml', 'data/account_data.xml', 'data/data_account_type.xml', 'data/configurable_account_chart.xml', 'account_invoice_workflow.xml', 'project/project_view.xml', 'project/project_report.xml', 'project/wizard/account_analytic_balance_report_view.xml', 'project/wizard/account_analytic_cost_ledger_view.xml', 'project/wizard/account_analytic_inverted_balance_report.xml', 'project/wizard/account_analytic_journal_report_view.xml', 'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml', 'project/wizard/account_analytic_chart_view.xml', 'partner_view.xml', 'product_view.xml', 'account_assert_test.xml', 'ir_sequence_view.xml', 'company_view.xml', 'edi/invoice_action_data.xml', 'account_bank_view.xml', 'res_config_view.xml', 'account_pre_install.yml', 'views/report_vat.xml', 'views/report_invoice.xml', 'views/report_trialbalance.xml', 'views/report_centraljournal.xml', 'views/report_overdue.xml', 'views/report_generaljournal.xml', 'views/report_journal.xml', 'views/report_salepurchasejournal.xml', 'views/report_partnerbalance.xml', 'views/report_agedpartnerbalance.xml', 'views/report_partnerledger.xml', 'views/report_partnerledgerother.xml', 'views/report_financial.xml', 'views/report_generalledger.xml', 'project/views/report_analyticbalance.xml', 'project/views/report_analyticjournal.xml', 'project/views/report_analyticcostledgerquantity.xml', 'project/views/report_analyticcostledger.xml', 'project/views/report_invertedanalyticbalance.xml', 'views/account.xml', ], 'qweb' : [ "static/src/xml/account_move_reconciliation.xml", "static/src/xml/account_move_line_quickadd.xml", "static/src/xml/account_bank_statement_reconciliation.xml", ], 'demo': [ 'demo/account_demo.xml', 'project/project_demo.xml', 'project/analytic_account_demo.xml', 'demo/account_minimal.xml', 'demo/account_invoice_demo.xml', 'demo/account_bank_statement.xml', 'account_unit_test.xml', ], 'test': [ 'test/account_test_users.yml', 'test/account_customer_invoice.yml', 'test/account_supplier_invoice.yml', 'test/account_change_currency.yml', 'test/chart_of_account.yml', 'test/account_period_close.yml', 'test/account_use_model.yml', 'test/account_validate_account_move.yml', 'test/test_edi_invoice.yml', 'test/account_report.yml', 'test/account_fiscalyear_close.yml', #last test, as it will definitively close the demo fiscalyear ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
egabancho/invenio
invenio/modules/search/testsuite/test_search_engine_summarizer.py
4
1335
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Unit tests for the search engine summarizer.""" # Note: citation summary tests were moved to BibRank as part of the # self-cite commit 1fcbed0ec34a9c31f8a727e21890c529d8222256. Keeping # this file here with empty test case set in order to overwrite any # previously installed file. Also, keeping TEST_SUITE empty so that # `inveniocfg --run-unit-tests' would not complain. from invenio.testsuite import make_test_suite, run_test_suite TEST_SUITE = make_test_suite() if __name__ == "__main__": run_test_suite(TEST_SUITE)
gpl-2.0
jayme-github/headphones
lib/cherrypy/_cprequest.py
58
37180
import os import sys import time import warnings import cherrypy from cherrypy._cpcompat import basestring, copykeys, ntob, unicodestr from cherrypy._cpcompat import SimpleCookie, CookieError, py3k from cherrypy import _cpreqbody, _cpconfig from cherrypy._cperror import format_exc, bare_error from cherrypy.lib import httputil, file_generator class Hook(object): """A callback and its metadata: failsafe, priority, and kwargs.""" callback = None """ The bare callable that this Hook object is wrapping, which will be called when the Hook is called.""" failsafe = False """ If True, the callback is guaranteed to run even if other callbacks from the same call point raise exceptions.""" priority = 50 """ Defines the order of execution for a list of Hooks. Priority numbers should be limited to the closed interval [0, 100], but values outside this range are acceptable, as are fractional values.""" kwargs = {} """ A set of keyword arguments that will be passed to the callable on each call.""" def __init__(self, callback, failsafe=None, priority=None, **kwargs): self.callback = callback if failsafe is None: failsafe = getattr(callback, "failsafe", False) self.failsafe = failsafe if priority is None: priority = getattr(callback, "priority", 50) self.priority = priority self.kwargs = kwargs def __lt__(self, other): # Python 3 return self.priority < other.priority def __cmp__(self, other): # Python 2 return cmp(self.priority, other.priority) def __call__(self): """Run self.callback(**self.kwargs).""" return self.callback(**self.kwargs) def __repr__(self): cls = self.__class__ return ("%s.%s(callback=%r, failsafe=%r, priority=%r, %s)" % (cls.__module__, cls.__name__, self.callback, self.failsafe, self.priority, ", ".join(['%s=%r' % (k, v) for k, v in self.kwargs.items()]))) class HookMap(dict): """A map of call points to lists of callbacks (Hook objects).""" def __new__(cls, points=None): d = dict.__new__(cls) for p in points or []: d[p] = [] return d def __init__(self, *a, **kw): pass def attach(self, point, callback, failsafe=None, priority=None, **kwargs): """Append a new Hook made from the supplied arguments.""" self[point].append(Hook(callback, failsafe, priority, **kwargs)) def run(self, point): """Execute all registered Hooks (callbacks) for the given point.""" exc = None hooks = self[point] hooks.sort() for hook in hooks: # Some hooks are guaranteed to run even if others at # the same hookpoint fail. We will still log the failure, # but proceed on to the next hook. The only way # to stop all processing from one of these hooks is # to raise SystemExit and stop the whole server. if exc is None or hook.failsafe: try: hook() except (KeyboardInterrupt, SystemExit): raise except (cherrypy.HTTPError, cherrypy.HTTPRedirect, cherrypy.InternalRedirect): exc = sys.exc_info()[1] except: exc = sys.exc_info()[1] cherrypy.log(traceback=True, severity=40) if exc: raise exc def __copy__(self): newmap = self.__class__() # We can't just use 'update' because we want copies of the # mutable values (each is a list) as well. for k, v in self.items(): newmap[k] = v[:] return newmap copy = __copy__ def __repr__(self): cls = self.__class__ return "%s.%s(points=%r)" % ( cls.__module__, cls.__name__, copykeys(self) ) # Config namespace handlers def hooks_namespace(k, v): """Attach bare hooks declared in config.""" # Use split again to allow multiple hooks for a single # hookpoint per path (e.g. "hooks.before_handler.1"). # Little-known fact you only get from reading source ;) hookpoint = k.split(".", 1)[0] if isinstance(v, basestring): v = cherrypy.lib.attributes(v) if not isinstance(v, Hook): v = Hook(v) cherrypy.serving.request.hooks[hookpoint].append(v) def request_namespace(k, v): """Attach request attributes declared in config.""" # Provides config entries to set request.body attrs (like # attempt_charsets). if k[:5] == 'body.': setattr(cherrypy.serving.request.body, k[5:], v) else: setattr(cherrypy.serving.request, k, v) def response_namespace(k, v): """Attach response attributes declared in config.""" # Provides config entries to set default response headers # http://cherrypy.org/ticket/889 if k[:8] == 'headers.': cherrypy.serving.response.headers[k.split('.', 1)[1]] = v else: setattr(cherrypy.serving.response, k, v) def error_page_namespace(k, v): """Attach error pages declared in config.""" if k != 'default': k = int(k) cherrypy.serving.request.error_page[k] = v hookpoints = ['on_start_resource', 'before_request_body', 'before_handler', 'before_finalize', 'on_end_resource', 'on_end_request', 'before_error_response', 'after_error_response'] class Request(object): """An HTTP request. This object represents the metadata of an HTTP request message; that is, it contains attributes which describe the environment in which the request URL, headers, and body were sent (if you want tools to interpret the headers and body, those are elsewhere, mostly in Tools). This 'metadata' consists of socket data, transport characteristics, and the Request-Line. This object also contains data regarding the configuration in effect for the given URL, and the execution plan for generating a response. """ prev = None """ The previous Request object (if any). This should be None unless we are processing an InternalRedirect.""" # Conversation/connection attributes local = httputil.Host("127.0.0.1", 80) "An httputil.Host(ip, port, hostname) object for the server socket." remote = httputil.Host("127.0.0.1", 1111) "An httputil.Host(ip, port, hostname) object for the client socket." scheme = "http" """ The protocol used between client and server. In most cases, this will be either 'http' or 'https'.""" server_protocol = "HTTP/1.1" """ The HTTP version for which the HTTP server is at least conditionally compliant.""" base = "" """The (scheme://host) portion of the requested URL. In some cases (e.g. when proxying via mod_rewrite), this may contain path segments which cherrypy.url uses when constructing url's, but which otherwise are ignored by CherryPy. Regardless, this value MUST NOT end in a slash.""" # Request-Line attributes request_line = "" """ The complete Request-Line received from the client. This is a single string consisting of the request method, URI, and protocol version (joined by spaces). Any final CRLF is removed.""" method = "GET" """ Indicates the HTTP method to be performed on the resource identified by the Request-URI. Common methods include GET, HEAD, POST, PUT, and DELETE. CherryPy allows any extension method; however, various HTTP servers and gateways may restrict the set of allowable methods. CherryPy applications SHOULD restrict the set (on a per-URI basis).""" query_string = "" """ The query component of the Request-URI, a string of information to be interpreted by the resource. The query portion of a URI follows the path component, and is separated by a '?'. For example, the URI 'http://www.cherrypy.org/wiki?a=3&b=4' has the query component, 'a=3&b=4'.""" query_string_encoding = 'utf8' """ The encoding expected for query string arguments after % HEX HEX decoding). If a query string is provided that cannot be decoded with this encoding, 404 is raised (since technically it's a different URI). If you want arbitrary encodings to not error, set this to 'Latin-1'; you can then encode back to bytes and re-decode to whatever encoding you like later. """ protocol = (1, 1) """The HTTP protocol version corresponding to the set of features which should be allowed in the response. If BOTH the client's request message AND the server's level of HTTP compliance is HTTP/1.1, this attribute will be the tuple (1, 1). If either is 1.0, this attribute will be the tuple (1, 0). Lower HTTP protocol versions are not explicitly supported.""" params = {} """ A dict which combines query string (GET) and request entity (POST) variables. This is populated in two stages: GET params are added before the 'on_start_resource' hook, and POST params are added between the 'before_request_body' and 'before_handler' hooks.""" # Message attributes header_list = [] """ A list of the HTTP request headers as (name, value) tuples. In general, you should use request.headers (a dict) instead.""" headers = httputil.HeaderMap() """ A dict-like object containing the request headers. Keys are header names (in Title-Case format); however, you may get and set them in a case-insensitive manner. That is, headers['Content-Type'] and headers['content-type'] refer to the same value. Values are header values (decoded according to :rfc:`2047` if necessary). See also: httputil.HeaderMap, httputil.HeaderElement.""" cookie = SimpleCookie() """See help(Cookie).""" rfile = None """ If the request included an entity (body), it will be available as a stream in this attribute. However, the rfile will normally be read for you between the 'before_request_body' hook and the 'before_handler' hook, and the resulting string is placed into either request.params or the request.body attribute. You may disable the automatic consumption of the rfile by setting request.process_request_body to False, either in config for the desired path, or in an 'on_start_resource' or 'before_request_body' hook. WARNING: In almost every case, you should not attempt to read from the rfile stream after CherryPy's automatic mechanism has read it. If you turn off the automatic parsing of rfile, you should read exactly the number of bytes specified in request.headers['Content-Length']. Ignoring either of these warnings may result in a hung request thread or in corruption of the next (pipelined) request. """ process_request_body = True """ If True, the rfile (if any) is automatically read and parsed, and the result placed into request.params or request.body.""" methods_with_bodies = ("POST", "PUT") """ A sequence of HTTP methods for which CherryPy will automatically attempt to read a body from the rfile. If you are going to change this property, modify it on the configuration (recommended) or on the "hook point" `on_start_resource`. """ body = None """ If the request Content-Type is 'application/x-www-form-urlencoded' or multipart, this will be None. Otherwise, this will be an instance of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>` (which you can .read()); this value is set between the 'before_request_body' and 'before_handler' hooks (assuming that process_request_body is True).""" # Dispatch attributes dispatch = cherrypy.dispatch.Dispatcher() """ The object which looks up the 'page handler' callable and collects config for the current request based on the path_info, other request attributes, and the application architecture. The core calls the dispatcher as early as possible, passing it a 'path_info' argument. The default dispatcher discovers the page handler by matching path_info to a hierarchical arrangement of objects, starting at request.app.root. See help(cherrypy.dispatch) for more information.""" script_name = "" """ The 'mount point' of the application which is handling this request. This attribute MUST NOT end in a slash. If the script_name refers to the root of the URI, it MUST be an empty string (not "/"). """ path_info = "/" """ The 'relative path' portion of the Request-URI. This is relative to the script_name ('mount point') of the application which is handling this request.""" login = None """ When authentication is used during the request processing this is set to 'False' if it failed and to the 'username' value if it succeeded. The default 'None' implies that no authentication happened.""" # Note that cherrypy.url uses "if request.app:" to determine whether # the call is during a real HTTP request or not. So leave this None. app = None """The cherrypy.Application object which is handling this request.""" handler = None """ The function, method, or other callable which CherryPy will call to produce the response. The discovery of the handler and the arguments it will receive are determined by the request.dispatch object. By default, the handler is discovered by walking a tree of objects starting at request.app.root, and is then passed all HTTP params (from the query string and POST body) as keyword arguments.""" toolmaps = {} """ A nested dict of all Toolboxes and Tools in effect for this request, of the form: {Toolbox.namespace: {Tool.name: config dict}}.""" config = None """ A flat dict of all configuration entries which apply to the current request. These entries are collected from global config, application config (based on request.path_info), and from handler config (exactly how is governed by the request.dispatch object in effect for this request; by default, handler config can be attached anywhere in the tree between request.app.root and the final handler, and inherits downward).""" is_index = None """ This will be True if the current request is mapped to an 'index' resource handler (also, a 'default' handler if path_info ends with a slash). The value may be used to automatically redirect the user-agent to a 'more canonical' URL which either adds or removes the trailing slash. See cherrypy.tools.trailing_slash.""" hooks = HookMap(hookpoints) """ A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}. Each key is a str naming the hook point, and each value is a list of hooks which will be called at that hook point during this request. The list of hooks is generally populated as early as possible (mostly from Tools specified in config), but may be extended at any time. See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools.""" error_response = cherrypy.HTTPError(500).set_response """ The no-arg callable which will handle unexpected, untrapped errors during request processing. This is not used for expected exceptions (like NotFound, HTTPError, or HTTPRedirect) which are raised in response to expected conditions (those should be customized either via request.error_page or by overriding HTTPError.set_response). By default, error_response uses HTTPError(500) to return a generic error response to the user-agent.""" error_page = {} """ A dict of {error code: response filename or callable} pairs. The error code must be an int representing a given HTTP error code, or the string 'default', which will be used if no matching entry is found for a given numeric code. If a filename is provided, the file should contain a Python string- formatting template, and can expect by default to receive format values with the mapping keys %(status)s, %(message)s, %(traceback)s, and %(version)s. The set of format mappings can be extended by overriding HTTPError.set_response. If a callable is provided, it will be called by default with keyword arguments 'status', 'message', 'traceback', and 'version', as for a string-formatting template. The callable must return a string or iterable of strings which will be set to response.body. It may also override headers or perform any other processing. If no entry is given for an error code, and no 'default' entry exists, a default template will be used. """ show_tracebacks = True """ If True, unexpected errors encountered during request processing will include a traceback in the response body.""" show_mismatched_params = True """ If True, mismatched parameters encountered during PageHandler invocation processing will be included in the response body.""" throws = (KeyboardInterrupt, SystemExit, cherrypy.InternalRedirect) """The sequence of exceptions which Request.run does not trap.""" throw_errors = False """ If True, Request.run will not trap any errors (except HTTPRedirect and HTTPError, which are more properly called 'exceptions', not errors).""" closed = False """True once the close method has been called, False otherwise.""" stage = None """ A string containing the stage reached in the request-handling process. This is useful when debugging a live server with hung requests.""" namespaces = _cpconfig.NamespaceSet( **{"hooks": hooks_namespace, "request": request_namespace, "response": response_namespace, "error_page": error_page_namespace, "tools": cherrypy.tools, }) def __init__(self, local_host, remote_host, scheme="http", server_protocol="HTTP/1.1"): """Populate a new Request object. local_host should be an httputil.Host object with the server info. remote_host should be an httputil.Host object with the client info. scheme should be a string, either "http" or "https". """ self.local = local_host self.remote = remote_host self.scheme = scheme self.server_protocol = server_protocol self.closed = False # Put a *copy* of the class error_page into self. self.error_page = self.error_page.copy() # Put a *copy* of the class namespaces into self. self.namespaces = self.namespaces.copy() self.stage = None def close(self): """Run cleanup code. (Core)""" if not self.closed: self.closed = True self.stage = 'on_end_request' self.hooks.run('on_end_request') self.stage = 'close' def run(self, method, path, query_string, req_protocol, headers, rfile): r"""Process the Request. (Core) method, path, query_string, and req_protocol should be pulled directly from the Request-Line (e.g. "GET /path?key=val HTTP/1.0"). path This should be %XX-unquoted, but query_string should not be. When using Python 2, they both MUST be byte strings, not unicode strings. When using Python 3, they both MUST be unicode strings, not byte strings, and preferably not bytes \x00-\xFF disguised as unicode. headers A list of (name, value) tuples. rfile A file-like object containing the HTTP request entity. When run() is done, the returned object should have 3 attributes: * status, e.g. "200 OK" * header_list, a list of (name, value) tuples * body, an iterable yielding strings Consumer code (HTTP servers) should then access these response attributes to build the outbound stream. """ response = cherrypy.serving.response self.stage = 'run' try: self.error_response = cherrypy.HTTPError(500).set_response self.method = method path = path or "/" self.query_string = query_string or '' self.params = {} # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. rp = int(req_protocol[5]), int(req_protocol[7]) sp = int(self.server_protocol[5]), int(self.server_protocol[7]) self.protocol = min(rp, sp) response.headers.protocol = self.protocol # Rebuild first line of the request (e.g. "GET /path HTTP/1.0"). url = path if query_string: url += '?' + query_string self.request_line = '%s %s %s' % (method, url, req_protocol) self.header_list = list(headers) self.headers = httputil.HeaderMap() self.rfile = rfile self.body = None self.cookie = SimpleCookie() self.handler = None # path_info should be the path from the # app root (script_name) to the handler. self.script_name = self.app.script_name self.path_info = pi = path[len(self.script_name):] self.stage = 'respond' self.respond(pi) except self.throws: raise except: if self.throw_errors: raise else: # Failure in setup, error handler or finalize. Bypass them. # Can't use handle_error because we may not have hooks yet. cherrypy.log(traceback=True, severity=40) if self.show_tracebacks: body = format_exc() else: body = "" r = bare_error(body) response.output_status, response.header_list, response.body = r if self.method == "HEAD": # HEAD requests MUST NOT return a message-body in the response. response.body = [] try: cherrypy.log.access() except: cherrypy.log.error(traceback=True) if response.timed_out: raise cherrypy.TimeoutError() return response # Uncomment for stage debugging # stage = property(lambda self: self._stage, lambda self, v: print(v)) def respond(self, path_info): """Generate a response for the resource at self.path_info. (Core)""" response = cherrypy.serving.response try: try: try: if self.app is None: raise cherrypy.NotFound() # Get the 'Host' header, so we can HTTPRedirect properly. self.stage = 'process_headers' self.process_headers() # Make a copy of the class hooks self.hooks = self.__class__.hooks.copy() self.toolmaps = {} self.stage = 'get_resource' self.get_resource(path_info) self.body = _cpreqbody.RequestBody( self.rfile, self.headers, request_params=self.params) self.namespaces(self.config) self.stage = 'on_start_resource' self.hooks.run('on_start_resource') # Parse the querystring self.stage = 'process_query_string' self.process_query_string() # Process the body if self.process_request_body: if self.method not in self.methods_with_bodies: self.process_request_body = False self.stage = 'before_request_body' self.hooks.run('before_request_body') if self.process_request_body: self.body.process() # Run the handler self.stage = 'before_handler' self.hooks.run('before_handler') if self.handler: self.stage = 'handler' response.body = self.handler() # Finalize self.stage = 'before_finalize' self.hooks.run('before_finalize') response.finalize() except (cherrypy.HTTPRedirect, cherrypy.HTTPError): inst = sys.exc_info()[1] inst.set_response() self.stage = 'before_finalize (HTTPError)' self.hooks.run('before_finalize') response.finalize() finally: self.stage = 'on_end_resource' self.hooks.run('on_end_resource') except self.throws: raise except: if self.throw_errors: raise self.handle_error() def process_query_string(self): """Parse the query string into Python structures. (Core)""" try: p = httputil.parse_query_string( self.query_string, encoding=self.query_string_encoding) except UnicodeDecodeError: raise cherrypy.HTTPError( 404, "The given query string could not be processed. Query " "strings for this resource must be encoded with %r." % self.query_string_encoding) # Python 2 only: keyword arguments must be byte strings (type 'str'). if not py3k: for key, value in p.items(): if isinstance(key, unicode): del p[key] p[key.encode(self.query_string_encoding)] = value self.params.update(p) def process_headers(self): """Parse HTTP header data into Python structures. (Core)""" # Process the headers into self.headers headers = self.headers for name, value in self.header_list: # Call title() now (and use dict.__method__(headers)) # so title doesn't have to be called twice. name = name.title() value = value.strip() # Warning: if there is more than one header entry for cookies # (AFAIK, only Konqueror does that), only the last one will # remain in headers (but they will be correctly stored in # request.cookie). if "=?" in value: dict.__setitem__(headers, name, httputil.decode_TEXT(value)) else: dict.__setitem__(headers, name, value) # Handle cookies differently because on Konqueror, multiple # cookies come on different lines with the same key if name == 'Cookie': try: self.cookie.load(value) except CookieError: msg = "Illegal cookie name %s" % value.split('=')[0] raise cherrypy.HTTPError(400, msg) if not dict.__contains__(headers, 'Host'): # All Internet-based HTTP/1.1 servers MUST respond with a 400 # (Bad Request) status code to any HTTP/1.1 request message # which lacks a Host header field. if self.protocol >= (1, 1): msg = "HTTP/1.1 requires a 'Host' request header." raise cherrypy.HTTPError(400, msg) host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip self.base = "%s://%s" % (self.scheme, host) def get_resource(self, path): """Call a dispatcher (which sets self.handler and .config). (Core)""" # First, see if there is a custom dispatch at this URI. Custom # dispatchers can only be specified in app.config, not in _cp_config # (since custom dispatchers may not even have an app.root). dispatch = self.app.find_config( path, "request.dispatch", self.dispatch) # dispatch() should set self.handler and self.config dispatch(path) def handle_error(self): """Handle the last unanticipated exception. (Core)""" try: self.hooks.run("before_error_response") if self.error_response: self.error_response() self.hooks.run("after_error_response") cherrypy.serving.response.finalize() except cherrypy.HTTPRedirect: inst = sys.exc_info()[1] inst.set_response() cherrypy.serving.response.finalize() # ------------------------- Properties ------------------------- # def _get_body_params(self): warnings.warn( "body_params is deprecated in CherryPy 3.2, will be removed in " "CherryPy 3.3.", DeprecationWarning ) return self.body.params body_params = property(_get_body_params, doc=""" If the request Content-Type is 'application/x-www-form-urlencoded' or multipart, this will be a dict of the params pulled from the entity body; that is, it will be the portion of request.params that come from the message body (sometimes called "POST params", although they can be sent with various HTTP method verbs). This value is set between the 'before_request_body' and 'before_handler' hooks (assuming that process_request_body is True). Deprecated in 3.2, will be removed for 3.3 in favor of :attr:`request.body.params<cherrypy._cprequest.RequestBody.params>`.""") class ResponseBody(object): """The body of the HTTP response (the response entity).""" if py3k: unicode_err = ("Page handlers MUST return bytes. Use tools.encode " "if you wish to return unicode.") def __get__(self, obj, objclass=None): if obj is None: # When calling on the class instead of an instance... return self else: return obj._body def __set__(self, obj, value): # Convert the given value to an iterable object. if py3k and isinstance(value, str): raise ValueError(self.unicode_err) if isinstance(value, basestring): # strings get wrapped in a list because iterating over a single # item list is much faster than iterating over every character # in a long string. if value: value = [value] else: # [''] doesn't evaluate to False, so replace it with []. value = [] elif py3k and isinstance(value, list): # every item in a list must be bytes... for i, item in enumerate(value): if isinstance(item, str): raise ValueError(self.unicode_err) # Don't use isinstance here; io.IOBase which has an ABC takes # 1000 times as long as, say, isinstance(value, str) elif hasattr(value, 'read'): value = file_generator(value) elif value is None: value = [] obj._body = value class Response(object): """An HTTP Response, including status, headers, and body.""" status = "" """The HTTP Status-Code and Reason-Phrase.""" header_list = [] """ A list of the HTTP response headers as (name, value) tuples. In general, you should use response.headers (a dict) instead. This attribute is generated from response.headers and is not valid until after the finalize phase.""" headers = httputil.HeaderMap() """ A dict-like object containing the response headers. Keys are header names (in Title-Case format); however, you may get and set them in a case-insensitive manner. That is, headers['Content-Type'] and headers['content-type'] refer to the same value. Values are header values (decoded according to :rfc:`2047` if necessary). .. seealso:: classes :class:`HeaderMap`, :class:`HeaderElement` """ cookie = SimpleCookie() """See help(Cookie).""" body = ResponseBody() """The body (entity) of the HTTP response.""" time = None """The value of time.time() when created. Use in HTTP dates.""" timeout = 300 """Seconds after which the response will be aborted.""" timed_out = False """ Flag to indicate the response should be aborted, because it has exceeded its timeout.""" stream = False """If False, buffer the response body.""" def __init__(self): self.status = None self.header_list = None self._body = [] self.time = time.time() self.headers = httputil.HeaderMap() # Since we know all our keys are titled strings, we can # bypass HeaderMap.update and get a big speed boost. dict.update(self.headers, { "Content-Type": 'text/html', "Server": "CherryPy/" + cherrypy.__version__, "Date": httputil.HTTPDate(self.time), }) self.cookie = SimpleCookie() def collapse_body(self): """Collapse self.body to a single string; replace it and return it.""" if isinstance(self.body, basestring): return self.body newbody = [] for chunk in self.body: if py3k and not isinstance(chunk, bytes): raise TypeError("Chunk %s is not of type 'bytes'." % repr(chunk)) newbody.append(chunk) newbody = ntob('').join(newbody) self.body = newbody return newbody def finalize(self): """Transform headers (and cookies) into self.header_list. (Core)""" try: code, reason, _ = httputil.valid_status(self.status) except ValueError: raise cherrypy.HTTPError(500, sys.exc_info()[1].args[0]) headers = self.headers self.status = "%s %s" % (code, reason) self.output_status = ntob(str(code), 'ascii') + \ ntob(" ") + headers.encode(reason) if self.stream: # The upshot: wsgiserver will chunk the response if # you pop Content-Length (or set it explicitly to None). # Note that lib.static sets C-L to the file's st_size. if dict.get(headers, 'Content-Length') is None: dict.pop(headers, 'Content-Length', None) elif code < 200 or code in (204, 205, 304): # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." dict.pop(headers, 'Content-Length', None) self.body = ntob("") else: # Responses which are not streamed should have a Content-Length, # but allow user code to set Content-Length if desired. if dict.get(headers, 'Content-Length') is None: content = self.collapse_body() dict.__setitem__(headers, 'Content-Length', len(content)) # Transform our header dict into a list of tuples. self.header_list = h = headers.output() cookie = self.cookie.output() if cookie: for line in cookie.split("\n"): if line.endswith("\r"): # Python 2.4 emits cookies joined by LF but 2.5+ by CRLF. line = line[:-1] name, value = line.split(": ", 1) if isinstance(name, unicodestr): name = name.encode("ISO-8859-1") if isinstance(value, unicodestr): value = headers.encode(value) h.append((name, value)) def check_timeout(self): """If now > self.time + self.timeout, set self.timed_out. This purposefully sets a flag, rather than raising an error, so that a monitor thread can interrupt the Response thread. """ if time.time() > self.time + self.timeout: self.timed_out = True
gpl-3.0
cjvogl/seismic
tsunami/1d/pwlinear1/pwlin_topo.py
2
1984
from pylab import * plot_profile = False x0 = -150e3 # left boundary (meters) x0_slope = -65e3 # start of slope x0_shelf = -45e3 # start of shelf x0_beach = -5e3 # start of beach x0_shore = 0. # initial shoreline x1 = x0_shore + 2e3 # right boundary z0_ocean = -3000. # depth of ocean z0_shelf = -100. # depth at x0_shelf z0_beach = -100. # depth at x0_beach z0_shore = 0. # depth at x0_shore ## Used by sloping_fault code to define seafloor so topo matches def get_seafloor_parameters(): return x0, x0_slope, x0_shelf, x0_beach, x0_shore, x1 if x0_beach != x0_shelf: slope_of_shelf = (z0_beach - z0_shelf) / (x0_beach - x0_shelf) else: slope_of_shelf = 1e9 if x0_slope != x0_shelf: slope_of_slope = (z0_ocean - z0_shelf) / (x0_slope - x0_shelf) else: slope_of_slope = 1e9 slope_of_beach = (z0_beach - z0_shore) / (x0_beach - x0_shore) print "Slope of shelf = ",slope_of_shelf print "Slope of beach = ",slope_of_beach def shelf_pwlin(r): """ Ocean followed by continental slope, continental shelf, and beach. The ocean is flat, the slope, shelf, and beach are linear. """ z = z0_shore + slope_of_beach * r # beach z = where(r<x0_beach, z0_shelf + slope_of_shelf*(r-x0_shelf), z) z = where(r<x0_shelf, z0_ocean + slope_of_slope*(r-x0_slope), z) z = where(r<x0_slope, z0_ocean, z) return z if __name__=="__main__": from make_nonuniform_grid import make_grid dx_min = 3. h_min = 5. x,B = make_grid(x0,x1,dx_min,h_min,shelf_pwlin,'grid.data') r = linspace(x0,x1,1000) s = shelf_pwlin(r) eta = where(s<0,0,s) figure(13,figsize=(12,5)) clf() fill_between(r/1e3,s,eta,color='b') plot(r/1e3,s,'g') xlim(x0/1e3, x1/1e3) ylim(z0_ocean*1.1, 200) xlabel('kilometers') ylabel('meters') title('shelf and beach profile') fname = 'profile.png' savefig(fname) print "Created ",fname
bsd-2-clause
MarkusAlexander/makken
node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
1789
10585
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode-ninja wrapper project file generator. This updates the data structures passed to the Xcode gyp generator to build with ninja instead. The Xcode project itself is transformed into a list of executable targets, each with a build step to build with ninja, and a target with every source and resource file. This appears to sidestep some of the major performance headaches experienced using complex projects and large number of targets within Xcode. """ import errno import gyp.generator.ninja import os import re import xml.sax.saxutils def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string) def _TargetFromSpec(old_spec, params): """ Create fake target for xcode-ninja wrapper. """ # Determine ninja top level build dir (e.g. /path/to/out). ninja_toplevel = None jobs = 0 if params: options = params['options'] ninja_toplevel = \ os.path.join(options.toplevel_dir, gyp.generator.ninja.ComputeOutputDir(params)) jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) target_name = old_spec.get('target_name') product_name = old_spec.get('product_name', target_name) product_extension = old_spec.get('product_extension') ninja_target = {} ninja_target['target_name'] = target_name ninja_target['product_name'] = product_name if product_extension: ninja_target['product_extension'] = product_extension ninja_target['toolset'] = old_spec.get('toolset') ninja_target['default_configuration'] = old_spec.get('default_configuration') ninja_target['configurations'] = {} # Tell Xcode to look in |ninja_toplevel| for build products. new_xcode_settings = {} if ninja_toplevel: new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel if 'configurations' in old_spec: for config in old_spec['configurations'].iterkeys(): old_xcode_settings = \ old_spec['configurations'][config].get('xcode_settings', {}) if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] ninja_target['configurations'][config] = {} ninja_target['configurations'][config]['xcode_settings'] = \ new_xcode_settings ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) ninja_target['ios_watchkit_extension'] = \ old_spec.get('ios_watchkit_extension', 0) ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) ninja_target['type'] = old_spec['type'] if ninja_toplevel: ninja_target['actions'] = [ { 'action_name': 'Compile and copy %s via ninja' % target_name, 'inputs': [], 'outputs': [], 'action': [ 'env', 'PATH=%s' % os.environ['PATH'], 'ninja', '-C', new_xcode_settings['CONFIGURATION_BUILD_DIR'], target_name, ], 'message': 'Compile and copy %s via ninja' % target_name, }, ] if jobs > 0: ninja_target['actions'][0]['action'].extend(('-j', jobs)) return ninja_target def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): """Limit targets for Xcode wrapper. Xcode sometimes performs poorly with too many targets, so only include proper executable targets, with filters to customize. Arguments: target_extras: Regular expression to always add, matching any target. executable_target_pattern: Regular expression limiting executable targets. spec: Specifications for target. """ target_name = spec.get('target_name') # Always include targets matching target_extras. if target_extras is not None and re.search(target_extras, target_name): return True # Otherwise just show executable targets. if spec.get('type', '') == 'executable' and \ spec.get('product_extension', '') != 'bundle': # If there is a filter and the target does not match, exclude the target. if executable_target_pattern is not None: if not re.search(executable_target_pattern, target_name): return False return True return False def CreateWrapper(target_list, target_dicts, data, params): """Initialize targets for the ninja wrapper. This sets up the necessary variables in the targets to generate Xcode projects that use ninja as an external builder. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dict of flattened build files keyed on gyp path. params: Dict of global options for gyp. """ orig_gyp = params['build_files'][0] for gyp_name, gyp_dict in data.iteritems(): if gyp_name == orig_gyp: depth = gyp_dict['_DEPTH'] # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE # and prepend .ninja before the .gyp extension. generator_flags = params.get('generator_flags', {}) main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) if main_gyp is None: (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) main_gyp = build_file_root + ".ninja" + build_file_ext # Create new |target_list|, |target_dicts| and |data| data structures. new_target_list = [] new_target_dicts = {} new_data = {} # Set base keys needed for |data|. new_data[main_gyp] = {} new_data[main_gyp]['included_files'] = [] new_data[main_gyp]['targets'] = [] new_data[main_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) # Normally the xcode-ninja generator includes only valid executable targets. # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to # executable targets that match the pattern. (Default all) executable_target_pattern = \ generator_flags.get('xcode_ninja_executable_target_pattern', None) # For including other non-executable targets, add the matching target name # to the |xcode_ninja_target_pattern| regular expression. (Default none) target_extras = generator_flags.get('xcode_ninja_target_pattern', None) for old_qualified_target in target_list: spec = target_dicts[old_qualified_target] if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): # Add to new_target_list. target_name = spec.get('target_name') new_target_name = '%s:%s#target' % (main_gyp, target_name) new_target_list.append(new_target_name) # Add to new_target_dicts. new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) # Add to new_data. for old_target in data[old_qualified_target.split(':')[0]]['targets']: if old_target['target_name'] == target_name: new_data_target = {} new_data_target['target_name'] = old_target['target_name'] new_data_target['toolset'] = old_target['toolset'] new_data[main_gyp]['targets'].append(new_data_target) # Create sources target. sources_target_name = 'sources_for_indexing' sources_target = _TargetFromSpec( { 'target_name' : sources_target_name, 'toolset': 'target', 'default_configuration': 'Default', 'mac_bundle': '0', 'type': 'executable' }, None) # Tell Xcode to look everywhere for headers. sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } sources = [] for target, target_dict in target_dicts.iteritems(): base = os.path.dirname(target) files = target_dict.get('sources', []) + \ target_dict.get('mac_bundle_resources', []) for action in target_dict.get('actions', []): files.extend(action.get('inputs', [])) # Remove files starting with $. These are mostly intermediate files for the # build system. files = [ file for file in files if not file.startswith('$')] # Make sources relative to root build file. relative_path = os.path.dirname(main_gyp) sources += [ os.path.relpath(os.path.join(base, file), relative_path) for file in files ] sources_target['sources'] = sorted(set(sources)) # Put sources_to_index in it's own gyp. sources_gyp = \ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") fully_qualified_target_name = \ '%s:%s#target' % (sources_gyp, sources_target_name) # Add to new_target_list, new_target_dicts and new_data. new_target_list.append(fully_qualified_target_name) new_target_dicts[fully_qualified_target_name] = sources_target new_data_target = {} new_data_target['target_name'] = sources_target['target_name'] new_data_target['_DEPTH'] = depth new_data_target['toolset'] = "target" new_data[sources_gyp] = {} new_data[sources_gyp]['targets'] = [] new_data[sources_gyp]['included_files'] = [] new_data[sources_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) new_data[sources_gyp]['targets'].append(new_data_target) # Write workspace to file. _WriteWorkspace(main_gyp, sources_gyp, params) return (new_target_list, new_target_dicts, new_data)
mit
GoogleCloudPlatform/professional-services
examples/cloudml-energy-price-forecasting/data_preparation/data_prep.py
2
9966
#!/usr/bin/env python #Copyright 2018 Google LLC # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # ============================================================================== """Prepares the data for Machine Learning. Uses the raw data in BigQuery to create a training, validation and test tables. Computes the mean and standard deviation for the weather features and saves them for later use by the TensorFlow model. Typical usage example: python -m data_preparation.data_prep """ import argparse import pickle from google.cloud import bigquery import numpy as np from tensorflow.python.lib.io import file_io from constants import constants def initialise_params(): """Parses all arguments and assigns default values when missing. Convert argument strings to objects and assign them as attributes of the namespace. Returns: An object containing all the parsed arguments for script to use. """ args_parser = argparse.ArgumentParser() args_parser.add_argument( '--dataset', help='Dataset name.', default='Energy' ) args_parser.add_argument( '--train_table', help='Name of the output BigQuery table containing training data.', default='MLDataTrain' ) args_parser.add_argument( '--valid_table', help='Name of the output BigQuery table containing validation data.', default='MLDataValid' ) args_parser.add_argument( '--test_table', help='Name of the output BigQuery table containing test data.', default='MLDataTest' ) args_parser.add_argument( '--prepare_data_file', help='Path to prepare_data sql file.', default='data_preparation/prepare_data.sql' ) args_parser.add_argument( '--weather_mean_std_file', help='Path to weather_mean_std sql file.', default='data_preparation/weather_mean_std.sql' ) args_parser.add_argument( '--train_from_date', help='Starting date for training data.', default='2015-01-05 00:00:00' ) args_parser.add_argument( '--train_to_date', help='End date for training data.', default='2015-10-04 23:01:00' ) args_parser.add_argument( '--valid_from_date', help='Starting date for validation data.', default='2015-10-05 00:00:00' ) args_parser.add_argument( '--valid_to_date', help='End date for validation data.', default='2015-10-11 23:01:00' ) args_parser.add_argument( '--test_from_date', help='Starting date for testing data.', default='2015-10-12 00:00:00' ) args_parser.add_argument( '--test_to_date', help='End date for testing data.', default='2015-10-18 23:01:00' ) args_parser.add_argument( '--price_scaling', help='Fraction used to scale energy prices.', default=0.01 ) args_parser.add_argument( '--mean_path', help='Output path for feature means.', default='gs://energyforecast/data/pickle/mean.pkl' ) args_parser.add_argument( '--std_path', help='Output path for feature standard deviations.', default='gs://energyforecast/data/pickle/std.pkl' ) return args_parser.parse_args() def run_query(client, query, job_config): """Runs specified SQL query in BigQuery. Args: client: `google.cloud.bigquery.client.Client` instance. query: String containing SQL query. job_configuration: `QueryJobConfig` instance. Returns: Result from running the query in BigQuery. """ query_job = client.query( query, job_config=job_config) return query_job.result() def scalar_extraction_query(inner_query): """Generates scalar extraction query. Extracts all values from array columns into scalars. Joins the energy hourly prices with the weather data, computes the hourly price distributions for the previous week, and extracts all scalars from the distribution and weather array columns. Args: inner_query: Query to join energy prices with past price distribution and with weather data. Returns: String with query wrapping the inner_query to extract scalar columns. """ distribution_cols = ['distribution[OFFSET(' + str(i) + ')] distribution' + str(i) for i in range(constants.DISTRIBUTION_SIZE)] weather_cols = ['weather[OFFSET(' + str(i) + ')] weather' + str(i) for i in range(constants.WEATHER_SIZE)] combined_cols = ', '.join(distribution_cols + weather_cols) with_statement = 'WITH Feature_Temp AS (' + inner_query + ')' select_statement = 'SELECT price, date_utc, day, hour, ' + combined_cols from_statement = 'FROM Feature_Temp' query = ' '.join([with_statement, select_statement, from_statement]) return query def create_table( from_date, to_date, table_name, query_file, dataset, price_scaling, client): """Creates training, validation, and test tables. Specifies parameters to be passed to the SQL query, specifies name for the new table being created, generates a dynamic query and executes the query. Args: from_date: Intial date for table's data. to_date: Final date for table's data. table_name: Name for table. query_file: Path to file containing the SQL query. dataset: `BigQuery` `Dataset` in which to save the table. price_scaling: Float used to scale (multiply with) the labels (price) for scaling purposes. Given the initialization schemes and normalized inputs, the expected values for the outputs will be close to 0. This means that by scaling the labels you will not be too far off from the start, which helps convergence. If a target is too big, the mean squared error will be huge which means your gradients will also be huge and could lead to numerical instability. client: `google.cloud.bigquery.client.Client` instance. """ query_params = [ bigquery.ScalarQueryParameter( 'from_date', 'STRING', from_date), bigquery.ScalarQueryParameter( 'to_date', 'STRING', to_date), bigquery.ScalarQueryParameter( 'price_scaling', 'FLOAT64', price_scaling)] table_ref = client.dataset( dataset).table( table_name) job_config = bigquery.QueryJobConfig() job_config.query_parameters = query_params job_config.destination = table_ref with open(query_file, 'r') as myfile: inner_query = myfile.read() run_query( client, scalar_extraction_query(inner_query), job_config) def generate_data(client, parameters): """Creates training, validation, and test tables for ML model. Args: client: `google.cloud.bigquery.client.Client` instance. parameters: Parameters passed to script. """ create_table( parameters.train_from_date, parameters.train_to_date, parameters.train_table, parameters.prepare_data_file, parameters.dataset, parameters.price_scaling, client) create_table( parameters.valid_from_date, parameters.valid_to_date, parameters.valid_table, parameters.prepare_data_file, parameters.dataset, parameters.price_scaling, client) create_table( parameters.test_from_date, parameters.test_to_date, parameters.test_table, parameters.prepare_data_file, parameters.dataset, parameters.price_scaling, client) def generate_mean_std(client, parameters): """Computes mean and standard deviation. Runs BigQuery query to compute mean and standard deviation of all weather features and saves the results to storage. Args: client: `google.cloud.bigquery.client.Client` instance. parameters: Parameters passed to script. """ query_params = [ bigquery.ScalarQueryParameter( 'train_from_date', 'STRING', parameters.train_from_date), bigquery.ScalarQueryParameter( 'train_to_date', 'STRING', parameters.train_to_date)] job_config = bigquery.QueryJobConfig() job_config.query_parameters = query_params with open(parameters.weather_mean_std_file, 'r') as myfile: query = myfile.read() results = run_query( client, query, job_config) for row in results: mean = np.array(row.mean, dtype=np.float32) std = np.array(row.std, dtype=np.float32) with file_io.FileIO( parameters.mean_path, mode='wb+' ) as f: pickle.dump(mean, f, protocol=2) with file_io.FileIO( parameters.std_path, mode='wb+' ) as f: pickle.dump(std, f, protocol=2) def main(): """Main function to be run when executing script.""" parameters = initialise_params() client = bigquery.Client() generate_data(client, parameters) generate_mean_std(client, parameters) if __name__ == '__main__': main()
apache-2.0
nagyistoce/edx-platform
common/djangoapps/student/tests/test_recent_enrollments.py
63
7937
""" Tests for the recently enrolled messaging within the Dashboard. """ import datetime from django.conf import settings from django.core.urlresolvers import reverse from opaque_keys.edx import locator from pytz import UTC import unittest import ddt from shoppingcart.models import DonationConfiguration from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from course_modes.tests.factories import CourseModeFactory from student.models import CourseEnrollment, DashboardConfiguration from student.views import get_course_enrollments, _get_recently_enrolled_courses # pylint: disable=protected-access @unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms') @ddt.ddt class TestRecentEnrollments(ModuleStoreTestCase): """ Unit tests for getting the list of courses for a logged in user """ PASSWORD = 'test' def setUp(self): """ Add a student """ super(TestRecentEnrollments, self).setUp() self.student = UserFactory() self.student.set_password(self.PASSWORD) self.student.save() # Old Course old_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0') course, enrollment = self._create_course_and_enrollment(old_course_location) enrollment.created = datetime.datetime(1900, 12, 31, 0, 0, 0, 0) enrollment.save() # New Course course_location = locator.CourseLocator('Org1', 'Course1', 'Run1') self.course, self.enrollment = self._create_course_and_enrollment(course_location) def _create_course_and_enrollment(self, course_location): """ Creates a course and associated enrollment. """ course = CourseFactory.create( org=course_location.org, number=course_location.course, run=course_location.run ) enrollment = CourseEnrollment.enroll(self.student, course.id) return course, enrollment def _configure_message_timeout(self, timeout): """Configure the amount of time the enrollment message will be displayed. """ config = DashboardConfiguration(recent_enrollment_time_delta=timeout) config.save() def test_recently_enrolled_courses(self): """ Test if the function for filtering recent enrollments works appropriately. """ self._configure_message_timeout(60) # get courses through iterating all courses courses_list = list(get_course_enrollments(self.student, None, [])) self.assertEqual(len(courses_list), 2) recent_course_list = _get_recently_enrolled_courses(courses_list) self.assertEqual(len(recent_course_list), 1) def test_zero_second_delta(self): """ Tests that the recent enrollment list is empty if configured to zero seconds. """ self._configure_message_timeout(0) courses_list = list(get_course_enrollments(self.student, None, [])) self.assertEqual(len(courses_list), 2) recent_course_list = _get_recently_enrolled_courses(courses_list) self.assertEqual(len(recent_course_list), 0) def test_enrollments_sorted_most_recent(self): """ Test that the list of newly created courses are properly sorted to show the most recent enrollments first. """ self._configure_message_timeout(600) # Create a number of new enrollments and courses, and force their creation behind # the first enrollment courses = [] for idx, seconds_past in zip(range(2, 6), [5, 10, 15, 20]): course_location = locator.CourseLocator( 'Org{num}'.format(num=idx), 'Course{num}'.format(num=idx), 'Run{num}'.format(num=idx) ) course, enrollment = self._create_course_and_enrollment(course_location) enrollment.created = datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds_past) enrollment.save() courses.append(course) courses_list = list(get_course_enrollments(self.student, None, [])) self.assertEqual(len(courses_list), 6) recent_course_list = _get_recently_enrolled_courses(courses_list) self.assertEqual(len(recent_course_list), 5) self.assertEqual(recent_course_list[1].course.id, courses[0].id) self.assertEqual(recent_course_list[2].course.id, courses[1].id) self.assertEqual(recent_course_list[3].course.id, courses[2].id) self.assertEqual(recent_course_list[4].course.id, courses[3].id) def test_dashboard_rendering(self): """ Tests that the dashboard renders the recent enrollment messages appropriately. """ self._configure_message_timeout(600) self.client.login(username=self.student.username, password=self.PASSWORD) response = self.client.get(reverse("dashboard")) self.assertContains(response, "Thank you for enrolling in") @ddt.data( #Register as an honor in any course modes with no payment option ([('audit', 0), ('honor', 0)], 'honor', True), ([('honor', 0)], 'honor', True), ([], 'honor', True), #Register as an honor in any course modes which has payment option ([('honor', 10)], 'honor', False), # This is a paid course ([('audit', 0), ('honor', 0), ('professional', 20)], 'honor', True), ([('audit', 0), ('honor', 0), ('verified', 20)], 'honor', True), ([('audit', 0), ('honor', 0), ('verified', 20), ('professional', 20)], 'honor', True), ([], 'honor', True), #Register as an audit in any course modes with no payment option ([('audit', 0), ('honor', 0)], 'audit', True), ([('audit', 0)], 'audit', True), #Register as an audit in any course modes which has no payment option ([('audit', 0), ('honor', 0), ('verified', 10)], 'audit', True), #Register as a verified in any course modes which has payment option ([('professional', 20)], 'professional', False), ([('verified', 20)], 'verified', False), ([('professional', 20), ('verified', 20)], 'verified', False), ([('audit', 0), ('honor', 0), ('verified', 20)], 'verified', False) ) @ddt.unpack def test_donate_button(self, course_modes, enrollment_mode, show_donate): # Enable the enrollment success message self._configure_message_timeout(10000) # Enable donations DonationConfiguration(enabled=True).save() # Create the course mode(s) for mode, min_price in course_modes: CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=min_price) self.enrollment.mode = enrollment_mode self.enrollment.save() # Check that the donate button is or is not displayed self.client.login(username=self.student.username, password=self.PASSWORD) response = self.client.get(reverse("dashboard")) if show_donate: self.assertContains(response, "donate-container") else: self.assertNotContains(response, "donate-container") def test_donate_button_honor_with_price(self): # Enable the enrollment success message and donations self._configure_message_timeout(10000) DonationConfiguration(enabled=True).save() # Create a white-label course mode # (honor mode with a price set) CourseModeFactory(mode_slug="honor", course_id=self.course.id, min_price=100) # Check that the donate button is NOT displayed self.client.login(username=self.student.username, password=self.PASSWORD) response = self.client.get(reverse("dashboard")) self.assertNotContains(response, "donate-container")
agpl-3.0
onitake/ansible
test/units/modules/network/f5/test_bigip_snmp_trap.py
21
6376
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_snmp_trap import V2Parameters from library.modules.bigip_snmp_trap import V1Parameters from library.modules.bigip_snmp_trap import ModuleManager from library.modules.bigip_snmp_trap import V2Manager from library.modules.bigip_snmp_trap import V1Manager from library.modules.bigip_snmp_trap import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.compat.mock import patch from test.units.compat.mock import DEFAULT from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_snmp_trap import V2Parameters from ansible.modules.network.f5.bigip_snmp_trap import V1Parameters from ansible.modules.network.f5.bigip_snmp_trap import ModuleManager from ansible.modules.network.f5.bigip_snmp_trap import V2Manager from ansible.modules.network.f5.bigip_snmp_trap import V1Manager from ansible.modules.network.f5.bigip_snmp_trap import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from units.compat.mock import DEFAULT from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_networked_parameters(self): args = dict( name='foo', snmp_version='1', community='public', destination='10.10.10.10', port=1000, network='other', password='password', server='localhost', user='admin' ) p = V2Parameters(params=args) assert p.name == 'foo' assert p.snmp_version == '1' assert p.community == 'public' assert p.destination == '10.10.10.10' assert p.port == 1000 assert p.network == 'other' def test_module_non_networked_parameters(self): args = dict( name='foo', snmp_version='1', community='public', destination='10.10.10.10', port=1000, network='other', password='password', server='localhost', user='admin' ) p = V1Parameters(params=args) assert p.name == 'foo' assert p.snmp_version == '1' assert p.community == 'public' assert p.destination == '10.10.10.10' assert p.port == 1000 assert p.network is None def test_api_parameters(self): args = dict( name='foo', community='public', host='10.10.10.10', network='other', version=1, port=1000 ) p = V2Parameters(params=args) assert p.name == 'foo' assert p.snmp_version == '1' assert p.community == 'public' assert p.destination == '10.10.10.10' assert p.port == 1000 assert p.network == 'other' class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_trap(self, *args): set_module_args(dict( name='foo', snmp_version='1', community='public', destination='10.10.10.10', port=1000, network='other', password='password', server='localhost', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods to force specific logic in the module to happen m0 = ModuleManager(module=module) m0.is_version_without_network = Mock(return_value=False) m0.is_version_with_default_network = Mock(return_value=True) patches = dict( create_on_device=DEFAULT, exists=DEFAULT ) with patch.multiple(V2Manager, **patches) as mo: mo['create_on_device'].side_effect = Mock(return_value=True) mo['exists'].side_effect = Mock(return_value=False) results = m0.exec_module() assert results['changed'] is True assert results['port'] == 1000 assert results['snmp_version'] == '1' def test_create_trap_non_network(self, *args): set_module_args(dict( name='foo', snmp_version='1', community='public', destination='10.10.10.10', port=1000, password='password', server='localhost', user='admin' )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) # Override methods to force specific logic in the module to happen m0 = ModuleManager(module=module) m0.is_version_without_network = Mock(return_value=True) patches = dict( create_on_device=DEFAULT, exists=DEFAULT ) with patch.multiple(V1Manager, **patches) as mo: mo['create_on_device'].side_effect = Mock(return_value=True) mo['exists'].side_effect = Mock(return_value=False) results = m0.exec_module() assert results['changed'] is True assert results['port'] == 1000 assert results['snmp_version'] == '1'
gpl-3.0
ljgabc/lfs
usr/lib/python2.7/code.py
256
10189
"""Utilities needed to emulate Python's interactive interpreter. """ # Inspired by similar code by Jeff Epler and Fredrik Lundh. import sys import traceback from codeop import CommandCompiler, compile_command __all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", "compile_command"] def softspace(file, newvalue): oldvalue = 0 try: oldvalue = file.softspace except AttributeError: pass try: file.softspace = newvalue except (AttributeError, TypeError): # "attribute-less object" or "read-only attributes" pass return oldvalue class InteractiveInterpreter: """Base class for InteractiveConsole. This class deals with parsing and interpreter state (the user's namespace); it doesn't deal with input buffering or prompting or input file naming (the filename is always passed in explicitly). """ def __init__(self, locals=None): """Constructor. The optional 'locals' argument specifies the dictionary in which code will be executed; it defaults to a newly created dictionary with key "__name__" set to "__console__" and key "__doc__" set to None. """ if locals is None: locals = {"__name__": "__console__", "__doc__": None} self.locals = locals self.compile = CommandCompiler() def runsource(self, source, filename="<input>", symbol="single"): """Compile and run some source in the interpreter. Arguments are as for compile_command(). One several things can happen: 1) The input is incorrect; compile_command() raised an exception (SyntaxError or OverflowError). A syntax traceback will be printed by calling the showsyntaxerror() method. 2) The input is incomplete, and more input is required; compile_command() returned None. Nothing happens. 3) The input is complete; compile_command() returned a code object. The code is executed by calling self.runcode() (which also handles run-time exceptions, except for SystemExit). The return value is True in case 2, False in the other cases (unless an exception is raised). The return value can be used to decide whether to use sys.ps1 or sys.ps2 to prompt the next line. """ try: code = self.compile(source, filename, symbol) except (OverflowError, SyntaxError, ValueError): # Case 1 self.showsyntaxerror(filename) return False if code is None: # Case 2 return True # Case 3 self.runcode(code) return False def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: exec code in self.locals except SystemExit: raise except: self.showtraceback() else: if softspace(sys.stdout, 0): print def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred. This doesn't display a stack trace because there isn't one. If a filename is given, it is stuffed in the exception instead of what was there before (because Python's parser always uses "<string>" when reading from a string). The output is written by self.write(), below. """ type, value, sys.last_traceback = sys.exc_info() sys.last_type = type sys.last_value = value if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: msg, (dummy_filename, lineno, offset, line) = value except: # Not the format we expect; leave it alone pass else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value list = traceback.format_exception_only(type, value) map(self.write, list) def showtraceback(self): """Display the exception that just occurred. We remove the first stack item because it is our own code. The output is written by self.write(), below. """ try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] list = traceback.format_list(tblist) if list: list.insert(0, "Traceback (most recent call last):\n") list[len(list):] = traceback.format_exception_only(type, value) finally: tblist = tb = None map(self.write, list) def write(self, data): """Write a string. The base implementation writes to sys.stderr; a subclass may replace this with a different implementation. """ sys.stderr.write(data) class InteractiveConsole(InteractiveInterpreter): """Closely emulate the behavior of the interactive Python interpreter. This class builds on InteractiveInterpreter and adds prompting using the familiar sys.ps1 and sys.ps2, and input buffering. """ def __init__(self, locals=None, filename="<console>"): """Constructor. The optional locals argument will be passed to the InteractiveInterpreter base class. The optional filename argument should specify the (file)name of the input stream; it will show up in tracebacks. """ InteractiveInterpreter.__init__(self, locals) self.filename = filename self.resetbuffer() def resetbuffer(self): """Reset the input buffer.""" self.buffer = [] def interact(self, banner=None): """Closely emulate the interactive Python console. The optional banner argument specify the banner to print before the first interaction; by default it prints a banner similar to the one printed by the real Python interpreter, followed by the current class name in parentheses (so as not to confuse this with the real interpreter -- since it's so close!). """ try: sys.ps1 except AttributeError: sys.ps1 = ">>> " try: sys.ps2 except AttributeError: sys.ps2 = "... " cprt = 'Type "help", "copyright", "credits" or "license" for more information.' if banner is None: self.write("Python %s on %s\n%s\n(%s)\n" % (sys.version, sys.platform, cprt, self.__class__.__name__)) else: self.write("%s\n" % str(banner)) more = 0 while 1: try: if more: prompt = sys.ps2 else: prompt = sys.ps1 try: line = self.raw_input(prompt) # Can be None if sys.stdin was redefined encoding = getattr(sys.stdin, "encoding", None) if encoding and not isinstance(line, unicode): line = line.decode(encoding) except EOFError: self.write("\n") break else: more = self.push(line) except KeyboardInterrupt: self.write("\nKeyboardInterrupt\n") self.resetbuffer() more = 0 def push(self, line): """Push a line to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter's runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is 1 if more input is required, 0 if the line was dealt with in some way (this is the same as runsource()). """ self.buffer.append(line) source = "\n".join(self.buffer) more = self.runsource(source, self.filename) if not more: self.resetbuffer() return more def raw_input(self, prompt=""): """Write a prompt and read a line. The returned line does not include the trailing newline. When the user enters the EOF key sequence, EOFError is raised. The base implementation uses the built-in function raw_input(); a subclass may replace this with a different implementation. """ return raw_input(prompt) def interact(banner=None, readfunc=None, local=None): """Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole class. When readfunc is not specified, it attempts to import the readline module to enable GNU readline if it is available. Arguments (all optional, all default to None): banner -- passed to InteractiveConsole.interact() readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() """ console = InteractiveConsole(local) if readfunc is not None: console.raw_input = readfunc else: try: import readline except ImportError: pass console.interact(banner) if __name__ == "__main__": interact()
gpl-2.0
ar4s/django
django/db/models/sql/where.py
8
16821
""" Code to manage the creation and SQL rendering of 'where' constraints. """ import collections import datetime from itertools import repeat from django.conf import settings from django.db.models.fields import DateTimeField, Field from django.db.models.sql.datastructures import EmptyResultSet, Empty from django.db.models.sql.aggregates import Aggregate from django.utils.six.moves import xrange from django.utils import timezone from django.utils import tree # Connection types AND = 'AND' OR = 'OR' class EmptyShortCircuit(Exception): """ Internal exception used to indicate that a "matches nothing" node should be added to the where-clause. """ pass class WhereNode(tree.Node): """ Used to represent the SQL where-clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually a tuple of: (Constraint(alias, targetcol, field), lookup_type, value) where value can be either raw Python value, or Query, ExpressionNode or something else knowing how to turn itself into SQL. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods. The second alternative should be used if the alias is not the only mutable variable. """ default = AND def _prepare_data(self, data): """ Prepare data for addition to the tree. If the data is a list or tuple, it is expected to be of the form (obj, lookup_type, value), where obj is a Constraint object, and is then slightly munged before being stored (to avoid storing any reference to field objects). Otherwise, the 'data' is stored unchanged and can be any class with an 'as_sql()' method. """ if not isinstance(data, (list, tuple)): return data obj, lookup_type, value = data if isinstance(value, collections.Iterator): # Consume any generators immediately, so that we can determine # emptiness and transform any non-empty values correctly. value = list(value) # The "value_annotation" parameter is used to pass auxilliary information # about the value(s) to the query construction. Specifically, datetime # and empty values need special handling. Other types could be used # here in the future (using Python types is suggested for consistency). if (isinstance(value, datetime.datetime) or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')): value_annotation = datetime.datetime elif hasattr(value, 'value_annotation'): value_annotation = value.value_annotation else: value_annotation = bool(value) if hasattr(obj, "prepare"): value = obj.prepare(lookup_type, value) return (obj, lookup_type, value_annotation, value) def as_sql(self, qn, connection): """ Returns the SQL version of the where clause and the value to be substituted in. Returns '', [] if this node matches everything, None, [] if this node is empty, and raises EmptyResultSet if this node can't match anything. """ # Note that the logic here is made slightly more complex than # necessary because there are two kind of empty nodes: Nodes # containing 0 children, and nodes that are known to match everything. # A match-everything node is different than empty node (which also # technically matches everything) for backwards compatibility reasons. # Refs #5261. result = [] result_params = [] everything_childs, nothing_childs = 0, 0 non_empty_childs = len(self.children) for child in self.children: try: if hasattr(child, 'as_sql'): sql, params = child.as_sql(qn=qn, connection=connection) else: # A leaf node in the tree. sql, params = self.make_atom(child, qn, connection) except EmptyResultSet: nothing_childs += 1 else: if sql: result.append(sql) result_params.extend(params) else: if sql is None: # Skip empty childs totally. non_empty_childs -= 1 continue everything_childs += 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. if self.connector == AND: full_needed, empty_needed = non_empty_childs, 1 else: full_needed, empty_needed = 1, non_empty_childs # Now, check if this node is full/empty using the # counts. if empty_needed - nothing_childs <= 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed - everything_childs <= 0: if self.negated: raise EmptyResultSet else: return '', [] if non_empty_childs == 0: # All the child nodes were empty, so this one is empty, too. return None, [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1: sql_string = '(%s)' % sql_string return sql_string, result_params def get_cols(self): cols = [] for child in self.children: if hasattr(child, 'get_cols'): cols.extend(child.get_cols()) else: if isinstance(child[0], Constraint): cols.append((child[0].alias, child[0].col)) if hasattr(child[3], 'get_cols'): cols.extend(child[3].get_cols()) return cols def make_atom(self, child, qn, connection): """ Turn a tuple (Constraint(table_alias, column_name, db_type), lookup_type, value_annotation, params) into valid SQL. The first item of the tuple may also be an Aggregate. Returns the string for the SQL fragment and the parameters to use for it. """ lvalue, lookup_type, value_annotation, params_or_value = child field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None if isinstance(lvalue, Constraint): try: lvalue, params = lvalue.process(lookup_type, params_or_value, connection) except EmptyShortCircuit: raise EmptyResultSet elif isinstance(lvalue, Aggregate): params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection) else: raise TypeError("'make_atom' expects a Constraint or an Aggregate " "as the first item of its 'child' argument.") if isinstance(lvalue, tuple): # A direct database column lookup. field_sql, field_params = self.sql_for_columns(lvalue, qn, connection, field_internal_type), [] else: # A smart object with an as_sql() method. field_sql, field_params = lvalue.as_sql(qn, connection) is_datetime_field = value_annotation is datetime.datetime cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s' if hasattr(params, 'as_sql'): extra, params = params.as_sql(qn, connection) cast_sql = '' else: extra = '' params = field_params + params if (len(params) == 1 and params[0] == '' and lookup_type == 'exact' and connection.features.interprets_empty_strings_as_nulls): lookup_type = 'isnull' value_annotation = True if lookup_type in connection.operators: format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),) return (format % (field_sql, connection.operators[lookup_type] % cast_sql, extra), params) if lookup_type == 'in': if not value_annotation: raise EmptyResultSet if extra: return ('%s IN %s' % (field_sql, extra), params) max_in_list_size = connection.ops.max_in_list_size() if max_in_list_size and len(params) > max_in_list_size: # Break up the params list into an OR of manageable chunks. in_clause_elements = ['('] for offset in xrange(0, len(params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % field_sql) group_size = min(len(params) - offset, max_in_list_size) param_group = ', '.join(repeat('%s', group_size)) in_clause_elements.append(param_group) in_clause_elements.append(')') in_clause_elements.append(')') return ''.join(in_clause_elements), params else: return ('%s IN (%s)' % (field_sql, ', '.join(repeat('%s', len(params)))), params) elif lookup_type in ('range', 'year'): return ('%s BETWEEN %%s and %%s' % field_sql, params) elif is_datetime_field and lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'): tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname) return ('%s = %%s' % sql, tz_params + params) elif lookup_type in ('month', 'day', 'week_day'): return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type, field_sql), params) elif lookup_type == 'isnull': assert value_annotation in (True, False), "Invalid value_annotation for isnull" return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ()) elif lookup_type == 'search': return (connection.ops.fulltext_search_sql(field_sql), params) elif lookup_type in ('regex', 'iregex'): return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params raise TypeError('Invalid lookup_type: %r' % lookup_type) def sql_for_columns(self, data, qn, connection, internal_type=None): """ Returns the SQL fragment used for the left-hand side of a column constraint (for example, the "T1.foo" portion in the clause "WHERE ... T1.foo = 6") and a list of parameters. """ table_alias, name, db_type = data if table_alias: lhs = '%s.%s' % (qn(table_alias), qn(name)) else: lhs = qn(name) return connection.ops.field_cast_sql(db_type, internal_type) % lhs def relabel_aliases(self, change_map): """ Relabels the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, 'relabel_aliases'): # For example another WhereNode child.relabel_aliases(change_map) elif isinstance(child, (list, tuple)): # tuple starting with Constraint child = (child[0].relabeled_clone(change_map),) + child[1:] if hasattr(child[3], 'relabeled_clone'): child = (child[0], child[1], child[2]) + ( child[3].relabeled_clone(change_map),) self.children[pos] = child def clone(self): """ Creates a clone of the tree. Must only be called on root nodes (nodes with empty subtree_parents). Childs must be either (Contraint, lookup, value) tuples, or objects supporting .clone(). """ clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone class EmptyWhere(WhereNode): def add(self, data, connector): return def as_sql(self, qn=None, connection=None): raise EmptyResultSet class EverythingNode(object): """ A node that matches everything. """ def as_sql(self, qn=None, connection=None): return '', [] class NothingNode(object): """ A node that matches nothing. """ def as_sql(self, qn=None, connection=None): raise EmptyResultSet class ExtraWhere(object): def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, qn=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class Constraint(object): """ An object that can be passed to WhereNode.add() and knows how to pre-process itself prior to including in the WhereNode. """ def __init__(self, alias, col, field): self.alias, self.col, self.field = alias, col, field def prepare(self, lookup_type, value): if self.field: return self.field.get_prep_lookup(lookup_type, value) return value def process(self, lookup_type, value, connection): """ Returns a tuple of data suitable for inclusion in a WhereNode instance. """ # Because of circular imports, we need to import this here. from django.db.models.base import ObjectDoesNotExist try: if self.field: params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = self.field.db_type(connection=connection) else: # This branch is used at times when we add a comparison to NULL # (we don't really want to waste time looking up the associated # field object at the calling location). params = Field().get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = None except ObjectDoesNotExist: raise EmptyShortCircuit return (self.alias, self.col, db_type), params def relabeled_clone(self, change_map): if self.alias not in change_map: return self else: new = Empty() new.__class__ = self.__class__ new.alias, new.col, new.field = change_map[self.alias], self.col, self.field return new class SubqueryConstraint(object): def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets self.query_object = query_object def as_sql(self, qn, connection): query = self.query_object # QuerySet was sent if hasattr(query, 'values'): if query._db and connection.alias != query._db: raise ValueError("Can't do subqueries with queries on different DBs.") # Do not override already existing values. if not hasattr(query, 'field_names'): query = query.values(*self.targets) else: query = query._clone() query = query.query query.clear_ordering(True) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, qn) def relabel_aliases(self, change_map): self.alias = change_map.get(self.alias, self.alias) def clone(self): return self.__class__( self.alias, self.columns, self.targets, self.query_object)
bsd-3-clause
manipopopo/tensorflow
tensorflow/contrib/layers/python/layers/initializers.py
1
6214
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Weight initializers for use with layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.python.framework import dtypes from tensorflow.python.ops import random_ops __all__ = ['xavier_initializer', 'xavier_initializer_conv2d', 'variance_scaling_initializer'] def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32): """Returns an initializer performing "Xavier" initialization for weights. This function implements the weight initialization from: Xavier Glorot and Yoshua Bengio (2010): [Understanding the difficulty of training deep feedforward neural networks. International conference on artificial intelligence and statistics.]( http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf) This initializer is designed to keep the scale of the gradients roughly the same in all layers. In uniform distribution this ends up being the range: `x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard deviation of `sqrt(2. / (in + out))` is used. Args: uniform: Whether to use uniform or normal distributed random initialization. seed: A Python integer. Used to create random seeds. See `tf.set_random_seed` for behavior. dtype: The data type. Only floating point types are supported. Returns: An initializer for a weight matrix. """ return variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=uniform, seed=seed, dtype=dtype) xavier_initializer_conv2d = xavier_initializer def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=None, dtype=dtypes.float32): """Returns an initializer that generates tensors without scaling variance. When initializing a deep network, it is in principle advantageous to keep the scale of the input variance constant, so it does not explode or diminish by reaching the final layer. This initializer use the following formula: ```python if mode='FAN_IN': # Count only number of input connections. n = fan_in elif mode='FAN_OUT': # Count only number of output connections. n = fan_out elif mode='FAN_AVG': # Average number of inputs and output connections. n = (fan_in + fan_out)/2.0 truncated_normal(shape, 0.0, stddev=sqrt(factor / n)) ``` * To get [Delving Deep into Rectifiers]( http://arxiv.org/pdf/1502.01852v1.pdf) (also know as the "MSRA initialization"), use (Default):<br/> `factor=2.0 mode='FAN_IN' uniform=False` * To get [Convolutional Architecture for Fast Feature Embedding]( http://arxiv.org/abs/1408.5093), use:<br/> `factor=1.0 mode='FAN_IN' uniform=True` * To get [Understanding the difficulty of training deep feedforward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf), use:<br/> `factor=1.0 mode='FAN_AVG' uniform=True.` * To get `xavier_initializer` use either:<br/> `factor=1.0 mode='FAN_AVG' uniform=True`, or<br/> `factor=1.0 mode='FAN_AVG' uniform=False`. Args: factor: Float. A multiplicative factor. mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'. uniform: Whether to use uniform or normal distributed random initialization. seed: A Python integer. Used to create random seeds. See `tf.set_random_seed` for behavior. dtype: The data type. Only floating point types are supported. Returns: An initializer that generates tensors with unit variance. Raises: ValueError: if `dtype` is not a floating point type. TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']. """ if not dtype.is_floating: raise TypeError('Cannot create initializer for non-floating point type.') if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']: raise TypeError('Unknow mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode) # pylint: disable=unused-argument def _initializer(shape, dtype=dtype, partition_info=None): """Initializer function.""" if not dtype.is_floating: raise TypeError('Cannot create initializer for non-floating point type.') # Estimating fan_in and fan_out is not possible to do perfectly, but we try. # This is the right thing for matrix multiply and convolutions. if shape: fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1]) fan_out = float(shape[-1]) else: fan_in = 1.0 fan_out = 1.0 for dim in shape[:-2]: fan_in *= float(dim) fan_out *= float(dim) if mode == 'FAN_IN': # Count only number of input connections. n = fan_in elif mode == 'FAN_OUT': # Count only number of output connections. n = fan_out elif mode == 'FAN_AVG': # Average number of inputs and output connections. n = (fan_in + fan_out) / 2.0 if uniform: # To get stddev = math.sqrt(factor / n) need to adjust for uniform. limit = math.sqrt(3.0 * factor / n) return random_ops.random_uniform(shape, -limit, limit, dtype, seed=seed) else: # To get stddev = math.sqrt(factor / n) need to adjust for truncated. trunc_stddev = math.sqrt(1.3 * factor / n) return random_ops.truncated_normal(shape, 0.0, trunc_stddev, dtype, seed=seed) # pylint: enable=unused-argument return _initializer
apache-2.0
yvaucher/bank-payment
__unported__/account_banking_payment/model/payment_order_create.py
7
1795
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>). # (C) 2011 - 2013 Therp BV (<http://therp.nl>). # (C) 2014 ACSONE SA/NV (<http://acsone.eu>). # # All other contributions are (C) by their respective contributors # # All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import orm class payment_order_create(orm.TransientModel): _inherit = 'payment.order.create' def extend_payment_order_domain( self, cr, uid, payment_order, domain, context=None): super(payment_order_create, self).extend_payment_order_domain( cr, uid, payment_order, domain, context=context) # apply payment term filter if payment_order.mode.payment_term_ids: domain += [ ('invoice.payment_term', 'in', [term.id for term in payment_order.mode.payment_term_ids] ) ] return True
agpl-3.0
techdragon/django
django/contrib/postgres/search.py
22
7827
from django.db.models import Field, FloatField from django.db.models.expressions import CombinedExpression, Func, Value from django.db.models.functions import Coalesce from django.db.models.lookups import Lookup class SearchVectorExact(Lookup): lookup_name = 'exact' def process_rhs(self, qn, connection): if not hasattr(self.rhs, 'resolve_expression'): config = getattr(self.lhs, 'config', None) self.rhs = SearchQuery(self.rhs, config=config) rhs, rhs_params = super(SearchVectorExact, self).process_rhs(qn, connection) return rhs, rhs_params def as_sql(self, qn, connection): lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return '%s @@ %s = true' % (lhs, rhs), params class SearchVectorField(Field): def db_type(self, connection): return 'tsvector' class SearchQueryField(Field): def db_type(self, connection): return 'tsquery' class SearchVectorCombinable(object): ADD = '||' def _combine(self, other, connector, reversed, node=None): if not isinstance(other, SearchVectorCombinable) or not self.config == other.config: raise TypeError('SearchVector can only be combined with other SearchVectors') if reversed: return CombinedSearchVector(other, connector, self, self.config) return CombinedSearchVector(self, connector, other, self.config) class SearchVector(SearchVectorCombinable, Func): function = 'to_tsvector' arg_joiner = " || ' ' || " _output_field = SearchVectorField() config = None def __init__(self, *expressions, **extra): super(SearchVector, self).__init__(*expressions, **extra) self.source_expressions = [ Coalesce(expression, Value('')) for expression in self.source_expressions ] self.config = self.extra.get('config', self.config) weight = self.extra.get('weight') if weight is not None and not hasattr(weight, 'resolve_expression'): weight = Value(weight) self.weight = weight def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): resolved = super(SearchVector, self).resolve_expression(query, allow_joins, reuse, summarize, for_save) if self.config: if not hasattr(self.config, 'resolve_expression'): resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save) else: resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save) return resolved def as_sql(self, compiler, connection, function=None, template=None): config_params = [] if template is None: if self.config: config_sql, config_params = compiler.compile(self.config) template = "%(function)s({}::regconfig, %(expressions)s)".format(config_sql.replace('%', '%%')) else: template = self.template sql, params = super(SearchVector, self).as_sql(compiler, connection, function=function, template=template) extra_params = [] if self.weight: weight_sql, extra_params = compiler.compile(self.weight) sql = 'setweight({}, {})'.format(sql, weight_sql) return sql, config_params + params + extra_params class CombinedSearchVector(SearchVectorCombinable, CombinedExpression): def __init__(self, lhs, connector, rhs, config, output_field=None): self.config = config super(CombinedSearchVector, self).__init__(lhs, connector, rhs, output_field) class SearchQuery(Value): invert = False _output_field = SearchQueryField() config = None BITAND = '&&' BITOR = '||' def __init__(self, value, output_field=None, **extra): self.config = extra.pop('config', self.config) self.invert = extra.pop('invert', self.invert) super(SearchQuery, self).__init__(value, output_field=output_field) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): resolved = super(SearchQuery, self).resolve_expression(query, allow_joins, reuse, summarize, for_save) if self.config: if not hasattr(self.config, 'resolve_expression'): resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save) else: resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save) return resolved def as_sql(self, compiler, connection): params = [self.value] if self.config: config_sql, config_params = compiler.compile(self.config) template = 'plainto_tsquery({}::regconfig, %s)'.format(config_sql) params = config_params + [self.value] else: template = 'plainto_tsquery(%s)' if self.invert: template = '!!({})'.format(template) return template, params def _combine(self, other, connector, reversed, node=None): combined = super(SearchQuery, self)._combine(other, connector, reversed, node) combined.output_field = SearchQueryField() return combined # On Combinable, these are not implemented to reduce confusion with Q. In # this case we are actually (ab)using them to do logical combination so # it's consistent with other usage in Django. def __or__(self, other): return self._combine(other, self.BITOR, False) def __ror__(self, other): return self._combine(other, self.BITOR, True) def __and__(self, other): return self._combine(other, self.BITAND, False) def __rand__(self, other): return self._combine(other, self.BITAND, True) def __invert__(self): extra = { 'invert': not self.invert, 'config': self.config, } return type(self)(self.value, **extra) class SearchRank(Func): function = 'ts_rank' _output_field = FloatField() def __init__(self, vector, query, **extra): if not hasattr(vector, 'resolve_expression'): vector = SearchVector(vector) if not hasattr(query, 'resolve_expression'): query = SearchQuery(query) weights = extra.get('weights') if weights is not None and not hasattr(weights, 'resolve_expression'): weights = Value(weights) self.weights = weights super(SearchRank, self).__init__(vector, query, **extra) def as_sql(self, compiler, connection, function=None, template=None): extra_params = [] extra_context = {} if template is None and self.extra.get('weights'): if self.weights: template = '%(function)s(%(weights)s, %(expressions)s)' weight_sql, extra_params = compiler.compile(self.weights) extra_context['weights'] = weight_sql sql, params = super(SearchRank, self).as_sql( compiler, connection, function=function, template=template, **extra_context ) return sql, extra_params + params SearchVectorField.register_lookup(SearchVectorExact) class TrigramBase(Func): def __init__(self, expression, string, **extra): if not hasattr(string, 'resolve_expression'): string = Value(string) super(TrigramBase, self).__init__(expression, string, output_field=FloatField(), **extra) class TrigramSimilarity(TrigramBase): function = 'SIMILARITY' class TrigramDistance(TrigramBase): function = '' arg_joiner = ' <-> '
bsd-3-clause
aarchiba/numpy
numpy/lib/stride_tricks.py
57
6761
""" Utilities that manipulate strides to achieve desirable effects. An explanation of strides can be found in the "ndarray.rst" file in the NumPy reference guide. """ from __future__ import division, absolute_import, print_function import numpy as np __all__ = ['broadcast_to', 'broadcast_arrays'] class DummyArray(object): """Dummy object that just exists to hang __array_interface__ dictionaries and possibly keep alive a reference to a base array. """ def __init__(self, interface, base=None): self.__array_interface__ = interface self.base = base def _maybe_view_as_subclass(original_array, new_array): if type(original_array) is not type(new_array): # if input was an ndarray subclass and subclasses were OK, # then view the result as that subclass. new_array = new_array.view(type=type(original_array)) # Since we have done something akin to a view from original_array, we # should let the subclass finalize (if it has it implemented, i.e., is # not None). if new_array.__array_finalize__: new_array.__array_finalize__(original_array) return new_array def as_strided(x, shape=None, strides=None, subok=False): """ Make an ndarray from the given array with the given shape and strides. """ # first convert input to array, possibly keeping subclass x = np.array(x, copy=False, subok=subok) interface = dict(x.__array_interface__) if shape is not None: interface['shape'] = tuple(shape) if strides is not None: interface['strides'] = tuple(strides) array = np.asarray(DummyArray(interface, base=x)) if array.dtype.fields is None and x.dtype.fields is not None: # This should only happen if x.dtype is [('', 'Vx')] array.dtype = x.dtype return _maybe_view_as_subclass(x, array) def _broadcast_to(array, shape, subok, readonly): shape = tuple(shape) if np.iterable(shape) else (shape,) array = np.array(array, copy=False, subok=subok) if not shape and array.shape: raise ValueError('cannot broadcast a non-scalar to a scalar array') if any(size < 0 for size in shape): raise ValueError('all elements of broadcast shape must be non-' 'negative') broadcast = np.nditer( (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'], op_flags=['readonly'], itershape=shape, order='C').itviews[0] result = _maybe_view_as_subclass(array, broadcast) if not readonly and array.flags.writeable: result.flags.writeable = True return result def broadcast_to(array, shape, subok=False): """Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple The shape of the desired array. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. Notes ----- .. versionadded:: 1.10.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) """ return _broadcast_to(array, shape, subok=subok, readonly=True) def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') if len(args) == 1: # a single argument does not work with np.broadcast return np.asarray(args[0]).shape # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape def broadcast_arrays(*args, **kwargs): """ Broadcast any number of arrays against each other. Parameters ---------- `*args` : array_likes The arrays to broadcast. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned arrays will be forced to be a base-class array (default). Returns ------- broadcasted : list of arrays These arrays are views on the original arrays. They are typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. If you need to write to the arrays, make copies first. Examples -------- >>> x = np.array([[1,2,3]]) >>> y = np.array([[1],[2],[3]]) >>> np.broadcast_arrays(x, y) [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] Here is a useful idiom for getting contiguous copies instead of non-contiguous views. >>> [np.array(a) for a in np.broadcast_arrays(x, y)] [array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]), array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])] """ # nditer is not used here to avoid the limit of 32 arrays. # Otherwise, something like the following one-liner would suffice: # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews subok = kwargs.pop('subok', False) if kwargs: raise TypeError('broadcast_arrays() got an unexpected keyword ' 'argument {}'.format(kwargs.pop())) args = [np.array(_m, copy=False, subok=subok) for _m in args] shape = _broadcast_shape(*args) if all(array.shape == shape for array in args): # Common case where nothing needs to be broadcasted. return args # TODO: consider making the results of broadcast_arrays readonly to match # broadcast_to. This will require a deprecation cycle. return [_broadcast_to(array, shape, subok=subok, readonly=False) for array in args]
bsd-3-clause
AMOboxTV/AMOBox.LegoBuild
plugin.video.specto/resources/lib/resolvers/googledocs.py
23
2319
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,json from resources.lib.libraries import client def resolve(url): try: url = url.split('/preview', 1)[0] url = url.replace('drive.google.com', 'docs.google.com') result = client.request(url) result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0] u = json.loads(result) u = [i.split('|')[-1] for i in u.split(',')] u = sum([tag(i) for i in u], []) url = [] try: url += [[i for i in u if i['quality'] == '1080p'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'HD'][0]] except: pass try: url += [[i for i in u if i['quality'] == 'SD'][0]] except: pass if url == []: return return url except: return def tag(url): quality = re.compile('itag=(\d*)').findall(url) quality += re.compile('=m(\d*)$').findall(url) try: quality = quality[0] except: return [] if quality in ['37', '137', '299', '96', '248', '303', '46']: return [{'quality': '1080p', 'url': url}] elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']: return [{'quality': 'HD', 'url': url}] elif quality in ['35', '44', '135', '244', '94']: return [{'quality': 'SD', 'url': url}] elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']: return [{'quality': 'SD', 'url': url}] elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']: return [{'quality': 'SD', 'url': url}] else: return []
gpl-2.0
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/pandas/tests/indexes/period/test_construction.py
6
19404
import pytest import numpy as np import pandas as pd import pandas.util.testing as tm import pandas.core.indexes.period as period from pandas.compat import lrange, PY3, text_type, lmap from pandas import (Period, PeriodIndex, period_range, offsets, date_range, Series, Index) class TestPeriodIndex(object): def setup_method(self, method): pass def test_construction_base_constructor(self): # GH 13664 arr = [pd.Period('2011-01', freq='M'), pd.NaT, pd.Period('2011-03', freq='M')] tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr))) arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')] tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr))) arr = [pd.Period('2011-01', freq='M'), pd.NaT, pd.Period('2011-03', freq='D')] tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object)) tm.assert_index_equal(pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)) def test_constructor_use_start_freq(self): # GH #1118 p = Period('4/2/2012', freq='B') index = PeriodIndex(start=p, periods=10) expected = PeriodIndex(start='4/2/2012', periods=10, freq='B') tm.assert_index_equal(index, expected) def test_constructor_field_arrays(self): # GH #1264 years = np.arange(1990, 2010).repeat(4)[2:-2] quarters = np.tile(np.arange(1, 5), 20)[2:-2] index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC') expected = period_range('1990Q3', '2009Q2', freq='Q-DEC') tm.assert_index_equal(index, expected) index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC') tm.assert_numpy_array_equal(index.asi8, index2.asi8) index = PeriodIndex(year=years, quarter=quarters) tm.assert_index_equal(index, expected) years = [2007, 2007, 2007] months = [1, 2] pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='M') pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='2M') pytest.raises(ValueError, PeriodIndex, year=years, month=months, freq='M', start=Period('2007-01', freq='M')) years = [2007, 2007, 2007] months = [1, 2, 3] idx = PeriodIndex(year=years, month=months, freq='M') exp = period_range('2007-01', periods=3, freq='M') tm.assert_index_equal(idx, exp) def test_constructor_U(self): # U was used as undefined period pytest.raises(ValueError, period_range, '2007-1-1', periods=500, freq='X') def test_constructor_nano(self): idx = period_range(start=Period(ordinal=1, freq='N'), end=Period(ordinal=4, freq='N'), freq='N') exp = PeriodIndex([Period(ordinal=1, freq='N'), Period(ordinal=2, freq='N'), Period(ordinal=3, freq='N'), Period(ordinal=4, freq='N')], freq='N') tm.assert_index_equal(idx, exp) def test_constructor_arrays_negative_year(self): years = np.arange(1960, 2000, dtype=np.int64).repeat(4) quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40) pindex = PeriodIndex(year=years, quarter=quarters) tm.assert_index_equal(pindex.year, pd.Index(years)) tm.assert_index_equal(pindex.quarter, pd.Index(quarters)) def test_constructor_invalid_quarters(self): pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004), quarter=lrange(4), freq='Q-DEC') def test_constructor_corner(self): pytest.raises(ValueError, PeriodIndex, periods=10, freq='A') start = Period('2007', freq='A-JUN') end = Period('2010', freq='A-DEC') pytest.raises(ValueError, PeriodIndex, start=start, end=end) pytest.raises(ValueError, PeriodIndex, start=start) pytest.raises(ValueError, PeriodIndex, end=end) result = period_range('2007-01', periods=10.5, freq='M') exp = period_range('2007-01', periods=10, freq='M') tm.assert_index_equal(result, exp) def test_constructor_fromarraylike(self): idx = period_range('2007-01', periods=20, freq='M') # values is an array of Period, thus can retrieve freq tm.assert_index_equal(PeriodIndex(idx.values), idx) tm.assert_index_equal(PeriodIndex(list(idx.values)), idx) pytest.raises(ValueError, PeriodIndex, idx._values) pytest.raises(ValueError, PeriodIndex, list(idx._values)) pytest.raises(TypeError, PeriodIndex, data=Period('2007', freq='A')) result = PeriodIndex(iter(idx)) tm.assert_index_equal(result, idx) result = PeriodIndex(idx) tm.assert_index_equal(result, idx) result = PeriodIndex(idx, freq='M') tm.assert_index_equal(result, idx) result = PeriodIndex(idx, freq=offsets.MonthEnd()) tm.assert_index_equal(result, idx) assert result.freq, 'M' result = PeriodIndex(idx, freq='2M') tm.assert_index_equal(result, idx.asfreq('2M')) assert result.freq, '2M' result = PeriodIndex(idx, freq=offsets.MonthEnd(2)) tm.assert_index_equal(result, idx.asfreq('2M')) assert result.freq, '2M' result = PeriodIndex(idx, freq='D') exp = idx.asfreq('D', 'e') tm.assert_index_equal(result, exp) def test_constructor_datetime64arr(self): vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64) vals = vals.view(np.dtype('M8[us]')) pytest.raises(ValueError, PeriodIndex, vals, freq='D') def test_constructor_dtype(self): # passing a dtype with a tz should localize idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]') exp = PeriodIndex(['2013-01', '2013-03'], freq='M') tm.assert_index_equal(idx, exp) assert idx.dtype == 'period[M]' idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]') exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D') tm.assert_index_equal(idx, exp) assert idx.dtype == 'period[3D]' # if we already have a freq and its not the same, then asfreq # (not changed) idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D') res = PeriodIndex(idx, dtype='period[M]') exp = PeriodIndex(['2013-01', '2013-01'], freq='M') tm.assert_index_equal(res, exp) assert res.dtype == 'period[M]' res = PeriodIndex(idx, freq='M') tm.assert_index_equal(res, exp) assert res.dtype == 'period[M]' msg = 'specified freq and dtype are different' with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(['2011-01'], freq='M', dtype='period[D]') def test_constructor_empty(self): idx = pd.PeriodIndex([], freq='M') assert isinstance(idx, PeriodIndex) assert len(idx) == 0 assert idx.freq == 'M' with tm.assert_raises_regex(ValueError, 'freq not specified'): pd.PeriodIndex([]) def test_constructor_pi_nat(self): idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='M')])) tm.assert_index_equal(idx, exp) idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='M')]) exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(np.array([pd.NaT, pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='M')])) tm.assert_index_equal(idx, exp) idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex([pd.NaT, pd.NaT]) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(np.array([pd.NaT, pd.NaT])) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(['NaT', 'NaT']) with tm.assert_raises_regex(ValueError, 'freq not specified'): PeriodIndex(np.array(['NaT', 'NaT'])) def test_constructor_incompat_freq(self): msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)" with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='D')]) with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT, Period('2011-01', freq='D')])) # first element is pd.NaT with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex([pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='D')]) with tm.assert_raises_regex(period.IncompatibleFrequency, msg): PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'), Period('2011-01', freq='D')])) def test_constructor_mixed(self): idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')]) exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M') tm.assert_index_equal(idx, exp) idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT, '2012-01-01']) exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D') tm.assert_index_equal(idx, exp) def test_constructor_simple_new(self): idx = period_range('2007-01', name='p', periods=2, freq='M') result = idx._simple_new(idx, 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new([pd.Period('2007-01', freq='M'), pd.Period('2007-02', freq='M')], 'p', freq=idx.freq) tm.assert_index_equal(result, idx) result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'), pd.Period('2007-02', freq='M')]), 'p', freq=idx.freq) tm.assert_index_equal(result, idx) def test_constructor_simple_new_empty(self): # GH13079 idx = PeriodIndex([], freq='M', name='p') result = idx._simple_new(idx, name='p', freq='M') tm.assert_index_equal(result, idx) def test_constructor_floats(self): # GH13079 for floats in [[1.1, 2.1], np.array([1.1, 2.1])]: with pytest.raises(TypeError): pd.PeriodIndex._simple_new(floats, freq='M') with pytest.raises(TypeError): pd.PeriodIndex(floats, freq='M') def test_constructor_nat(self): pytest.raises(ValueError, period_range, start='NaT', end='2011-01-01', freq='M') pytest.raises(ValueError, period_range, start='2011-01-01', end='NaT', freq='M') def test_constructor_year_and_quarter(self): year = pd.Series([2001, 2002, 2003]) quarter = year - 2000 idx = PeriodIndex(year=year, quarter=quarter) strs = ['%dQ%d' % t for t in zip(quarter, year)] lops = list(map(Period, strs)) p = PeriodIndex(lops) tm.assert_index_equal(p, idx) def test_constructor_freq_mult(self): # GH #7811 for func in [PeriodIndex, period_range]: # must be the same, but for sure... pidx = func(start='2014-01', freq='2M', periods=4) expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='2M') tm.assert_index_equal(pidx, expected) pidx = func(start='2014-01-02', end='2014-01-15', freq='3D') expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11', '2014-01-14'], freq='3D') tm.assert_index_equal(pidx, expected) pidx = func(end='2014-01-01 17:00', freq='4H', periods=3) expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00', '2014-01-01 17:00'], freq='4H') tm.assert_index_equal(pidx, expected) msg = ('Frequency must be positive, because it' ' represents span: -1M') with tm.assert_raises_regex(ValueError, msg): PeriodIndex(['2011-01'], freq='-1M') msg = ('Frequency must be positive, because it' ' represents span: 0M') with tm.assert_raises_regex(ValueError, msg): PeriodIndex(['2011-01'], freq='0M') msg = ('Frequency must be positive, because it' ' represents span: 0M') with tm.assert_raises_regex(ValueError, msg): period_range('2011-01', periods=3, freq='0M') def test_constructor_freq_mult_dti_compat(self): import itertools mults = [1, 2, 3, 4, 5] freqs = ['A', 'M', 'D', 'T', 'S'] for mult, freq in itertools.product(mults, freqs): freqstr = str(mult) + freq pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10) expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freqstr) tm.assert_index_equal(pidx, expected) def test_constructor_freq_combined(self): for freq in ['1D1H', '1H1D']: pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq) expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'], freq='25H') for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]): pidx = func(start='2016-01-01', periods=2, freq=freq) expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'], freq='25H') tm.assert_index_equal(pidx, expected) def test_constructor(self): pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') assert len(pi) == 9 pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009') assert len(pi) == 4 * 9 pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009') assert len(pi) == 12 * 9 pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009') assert len(pi) == 365 * 9 + 2 pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009') assert len(pi) == 261 * 9 pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00') assert len(pi) == 365 * 24 pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59') assert len(pi) == 24 * 60 pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59') assert len(pi) == 24 * 60 * 60 start = Period('02-Apr-2005', 'B') i1 = PeriodIndex(start=start, periods=20) assert len(i1) == 20 assert i1.freq == start.freq assert i1[0] == start end_intv = Period('2006-12-31', 'W') i1 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == 10 assert i1.freq == end_intv.freq assert i1[-1] == end_intv end_intv = Period('2006-12-31', '1w') i2 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == len(i2) assert (i1 == i2).all() assert i1.freq == i2.freq end_intv = Period('2006-12-31', ('w', 1)) i2 = PeriodIndex(end=end_intv, periods=10) assert len(i1) == len(i2) assert (i1 == i2).all() assert i1.freq == i2.freq end_intv = Period('2005-05-01', 'B') i1 = PeriodIndex(start=start, end=end_intv) # infer freq from first element i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) assert len(i2) == 2 assert i2[0] == end_intv i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')])) assert len(i2) == 2 assert i2[0] == end_intv # Mixed freq should fail vals = [end_intv, Period('2006-12-31', 'w')] pytest.raises(ValueError, PeriodIndex, vals) vals = np.array(vals) pytest.raises(ValueError, PeriodIndex, vals) def test_constructor_error(self): start = Period('02-Apr-2005', 'B') end_intv = Period('2006-12-31', ('w', 1)) msg = 'Start and end must have same freq' with tm.assert_raises_regex(ValueError, msg): PeriodIndex(start=start, end=end_intv) msg = 'Must specify 2 of start, end, periods' with tm.assert_raises_regex(ValueError, msg): PeriodIndex(start=start) def test_recreate_from_data(self): for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']: org = PeriodIndex(start='2001/04/01', freq=o, periods=1) idx = PeriodIndex(org.values, freq=o) tm.assert_index_equal(idx, org) def test_map_with_string_constructor(self): raw = [2005, 2007, 2009] index = PeriodIndex(raw, freq='A') types = str, if PY3: # unicode types += text_type, for t in types: expected = Index(lmap(t, raw)) res = index.map(t) # should return an Index assert isinstance(res, Index) # preserve element types assert all(isinstance(resi, t) for resi in res) # lastly, values should compare equal tm.assert_index_equal(res, expected) class TestSeriesPeriod(object): def setup_method(self, method): self.series = Series(period_range('2000-01-01', periods=10, freq='D')) def test_constructor_cant_cast_period(self): with pytest.raises(TypeError): Series(period_range('2000-01-01', periods=10, freq='D'), dtype=float) def test_constructor_cast_object(self): s = Series(period_range('1/1/2000', periods=10), dtype=object) exp = Series(period_range('1/1/2000', periods=10)) tm.assert_series_equal(s, exp)
mit
wavelets/zulip
zerver/lib/cache.py
115
11742
from __future__ import absolute_import from functools import wraps from django.core.cache import cache as djcache from django.core.cache import get_cache from django.conf import settings from django.db.models import Q from zerver.lib.utils import statsd, statsd_key, make_safe_digest import time import base64 import random import sys import os import os.path import hashlib memcached_time_start = 0 memcached_total_time = 0 memcached_total_requests = 0 def get_memcached_time(): return memcached_total_time def get_memcached_requests(): return memcached_total_requests def memcached_stats_start(): global memcached_time_start memcached_time_start = time.time() def memcached_stats_finish(): global memcached_total_time global memcached_total_requests global memcached_time_start memcached_total_requests += 1 memcached_total_time += (time.time() - memcached_time_start) def get_or_create_key_prefix(): if settings.TEST_SUITE: # This sets the prefix mostly for the benefit of the JS tests. # The Python tests overwrite KEY_PREFIX on each test. return 'test_suite:' + str(os.getpid()) + ':' filename = os.path.join(settings.DEPLOY_ROOT, "memcached_prefix") try: fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0444) prefix = base64.b16encode(hashlib.sha256(str(random.getrandbits(256))).digest())[:32].lower() + ':' # This does close the underlying file with os.fdopen(fd, 'w') as f: f.write(prefix + "\n") except OSError: # The file already exists tries = 1 while tries < 10: with file(filename, 'r') as f: prefix = f.readline()[:-1] if len(prefix) == 33: break tries += 1 prefix = '' time.sleep(0.5) if not prefix: sys.exit("Could not read memcache key prefix file") return prefix KEY_PREFIX = get_or_create_key_prefix() def bounce_key_prefix_for_testing(test_name): global KEY_PREFIX KEY_PREFIX = test_name + ':' + str(os.getpid()) + ':' def get_cache_backend(cache_name): if cache_name is None: return djcache return get_cache(cache_name) def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None): """Decorator which applies Django caching to a function. Decorator argument is a function which computes a cache key from the original function's arguments. You are responsible for avoiding collisions with other uses of this decorator or other uses of caching.""" def decorator(func): @wraps(func) def func_with_caching(*args, **kwargs): key = keyfunc(*args, **kwargs) val = cache_get(key, cache_name=cache_name) extra = "" if cache_name == 'database': extra = ".dbcache" if with_statsd_key is not None: metric_key = with_statsd_key else: metric_key = statsd_key(key) status = "hit" if val is not None else "miss" statsd.incr("cache%s.%s.%s" % (extra, metric_key, status)) # Values are singleton tuples so that we can distinguish # a result of None from a missing key. if val is not None: return val[0] val = func(*args, **kwargs) cache_set(key, val, cache_name=cache_name, timeout=timeout) return val return func_with_caching return decorator def cache_set(key, val, cache_name=None, timeout=None): memcached_stats_start() cache_backend = get_cache_backend(cache_name) ret = cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout) memcached_stats_finish() return ret def cache_get(key, cache_name=None): memcached_stats_start() cache_backend = get_cache_backend(cache_name) ret = cache_backend.get(KEY_PREFIX + key) memcached_stats_finish() return ret def cache_get_many(keys, cache_name=None): keys = [KEY_PREFIX + key for key in keys] memcached_stats_start() ret = get_cache_backend(cache_name).get_many(keys) memcached_stats_finish() return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()]) def cache_set_many(items, cache_name=None, timeout=None): new_items = {} for key in items: new_items[KEY_PREFIX + key] = items[key] items = new_items memcached_stats_start() ret = get_cache_backend(cache_name).set_many(items, timeout=timeout) memcached_stats_finish() return ret def cache_delete(key, cache_name=None): memcached_stats_start() get_cache_backend(cache_name).delete(KEY_PREFIX + key) memcached_stats_finish() def cache_delete_many(items, cache_name=None): memcached_stats_start() get_cache_backend(cache_name).delete_many( KEY_PREFIX + item for item in items) memcached_stats_finish() # Required Arguments are as follows: # * object_ids: The list of object ids to look up # * cache_key_function: object_id => cache key # * query_function: [object_ids] => [objects from database] # Optional keyword arguments: # * setter: Function to call before storing items to cache (e.g. compression) # * extractor: Function to call on items returned from cache # (e.g. decompression). Should be the inverse of the setter # function. # * id_fetcher: Function mapping an object from database => object_id # (in case we're using a key more complex than obj.id) # * cache_transformer: Function mapping an object from database => # value for cache (in case the values that we're caching are some # function of the objects, not the objects themselves) def generic_bulk_cached_fetch(cache_key_function, query_function, object_ids, extractor=lambda obj: obj, setter=lambda obj: obj, id_fetcher=lambda obj: obj.id, cache_transformer=lambda obj: obj): cache_keys = {} for object_id in object_ids: cache_keys[object_id] = cache_key_function(object_id) cached_objects = cache_get_many([cache_keys[object_id] for object_id in object_ids]) for (key, val) in cached_objects.items(): cached_objects[key] = extractor(cached_objects[key][0]) needed_ids = [object_id for object_id in object_ids if cache_keys[object_id] not in cached_objects] db_objects = query_function(needed_ids) items_for_memcached = {} for obj in db_objects: key = cache_keys[id_fetcher(obj)] item = cache_transformer(obj) items_for_memcached[key] = (setter(item),) cached_objects[key] = item if len(items_for_memcached) > 0: cache_set_many(items_for_memcached) return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids if cache_keys[object_id] in cached_objects) def cache(func): """Decorator which applies Django caching to a function. Uses a key based on the function's name, filename, and the repr() of its arguments.""" func_uniqifier = '%s-%s' % (func.func_code.co_filename, func.func_name) @wraps(func) def keyfunc(*args, **kwargs): # Django complains about spaces because memcached rejects them key = func_uniqifier + repr((args, kwargs)) return key.replace('-','--').replace(' ','-s') return cache_with_key(keyfunc)(func) def message_cache_key(message_id): return "message:%d" % (message_id,) def display_recipient_cache_key(recipient_id): return "display_recipient_dict:%d" % (recipient_id,) def user_profile_by_email_cache_key(email): # See the comment in zerver/lib/avatar.py:gravatar_hash for why we # are proactively encoding email addresses even though they will # with high likelihood be ASCII-only for the foreseeable future. return 'user_profile_by_email:%s' % (make_safe_digest(email.strip()),) def user_profile_by_id_cache_key(user_profile_id): return "user_profile_by_id:%s" % (user_profile_id,) def cache_save_user_profile(user_profile): cache_set(user_profile_by_id_cache_key(user_profile.id), user_profile, timeout=3600*24*7) def active_user_dicts_in_realm_cache_key(realm): return "active_user_dicts_in_realm:%s" % (realm.id,) def active_bot_dicts_in_realm_cache_key(realm): return "active_bot_dicts_in_realm:%s" % (realm.id,) def get_stream_cache_key(stream_name, realm): from zerver.models import Realm if isinstance(realm, Realm): realm_id = realm.id else: realm_id = realm return "stream_by_realm_and_name:%s:%s" % ( realm_id, make_safe_digest(stream_name.strip().lower())) def update_user_profile_caches(user_profiles): items_for_memcached = {} for user_profile in user_profiles: items_for_memcached[user_profile_by_email_cache_key(user_profile.email)] = (user_profile,) items_for_memcached[user_profile_by_id_cache_key(user_profile.id)] = (user_profile,) cache_set_many(items_for_memcached) # Called by models.py to flush the user_profile cache whenever we save # a user_profile object def flush_user_profile(sender, **kwargs): user_profile = kwargs['instance'] update_user_profile_caches([user_profile]) # Invalidate our active_users_in_realm info dict if any user has changed # name or email if kwargs['update_fields'] is None or \ len(set(['full_name', 'short_name', 'email', 'is_active']) & set(kwargs['update_fields'])) > 0: cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm)) # Invalidate our active_bots_in_realm info dict if any bot has changed bot_fields = {'full_name', 'api_key', 'avatar_source', 'default_all_public_streams', 'is_active', 'default_sending_stream', 'default_events_register_stream'} if user_profile.is_bot and (kwargs['update_fields'] is None or bot_fields & set(kwargs['update_fields'])): cache_delete(active_bot_dicts_in_realm_cache_key(user_profile.realm)) # Invalidate realm-wide alert words cache if any user in the realm has changed # alert words if kwargs['update_fields'] is None or "alert_words" in kwargs['update_fields']: cache_delete(realm_alert_words_cache_key(user_profile.realm)) # Called by models.py to flush various caches whenever we save # a Realm object. The main tricky thing here is that Realm info is # generally cached indirectly through user_profile objects. def flush_realm(sender, **kwargs): realm = kwargs['instance'] users = realm.get_active_users() update_user_profile_caches(users) if realm.deactivated: cache_delete(active_user_dicts_in_realm_cache_key(realm)) cache_delete(active_bot_dicts_in_realm_cache_key(realm)) cache_delete(realm_alert_words_cache_key(realm)) def realm_alert_words_cache_key(realm): return "realm_alert_words:%s" % (realm.domain,) # Called by models.py to flush the stream cache whenever we save a stream # object. def flush_stream(sender, **kwargs): from zerver.models import UserProfile stream = kwargs['instance'] items_for_memcached = {} items_for_memcached[get_stream_cache_key(stream.name, stream.realm)] = (stream,) cache_set_many(items_for_memcached) if kwargs['update_fields'] is None or 'name' in kwargs['update_fields'] and \ UserProfile.objects.filter( Q(default_sending_stream=stream) | Q(default_events_register_stream=stream) ).exists(): cache_delete(active_bot_dicts_in_realm_cache_key(stream.realm))
apache-2.0
direvus/ansible
lib/ansible/modules/cloud/amazon/ec2_placement_group.py
53
6407
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_placement_group short_description: Create or delete an EC2 Placement Group description: - Create an EC2 Placement Group; if the placement group already exists, nothing is done. Or, delete an existing placement group. If the placement group is absent, do nothing. See also http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html version_added: "2.5" author: "Brad Macpherson (@iiibrad)" options: name: description: - The name for the placement group. required: true state: description: - Create or delete placement group. required: false default: present choices: [ 'present', 'absent' ] strategy: description: - Placement group strategy. Cluster will cluster instances into a low-latency group in a single Availability Zone, while Spread spreads instances across underlying hardware. required: false default: cluster choices: [ 'cluster', 'spread' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide # for details. # Create a placement group. - ec2_placement_group: name: my-cluster state: present # Create a Spread placement group. - ec2_placement_group: name: my-cluster state: present strategy: spread # Delete a placement group. - ec2_placement_group: name: my-cluster state: absent ''' RETURN = ''' placement_group: description: Placement group attributes returned: when state != absent type: complex contains: name: description: PG name type: string sample: my-cluster state: description: PG state type: string sample: "available" strategy: description: PG strategy type: string sample: "cluster" ''' from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.ec2 import (AWSRetry, boto3_conn, ec2_argument_spec, get_aws_connection_info) try: from botocore.exceptions import (BotoCoreError, ClientError) except ImportError: pass # caught by AnsibleAWSModule @AWSRetry.exponential_backoff() def get_placement_group_details(connection, module): name = module.params.get("name") try: response = connection.describe_placement_groups( Filters=[{ "Name": "group-name", "Values": [name] }]) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Couldn't find placement group named [%s]" % name) if len(response['PlacementGroups']) != 1: return None else: placement_group = response['PlacementGroups'][0] return { "name": placement_group['GroupName'], "state": placement_group['State'], "strategy": placement_group['Strategy'], } @AWSRetry.exponential_backoff() def create_placement_group(connection, module): name = module.params.get("name") strategy = module.params.get("strategy") try: connection.create_placement_group( GroupName=name, Strategy=strategy, DryRun=module.check_mode) except (BotoCoreError, ClientError) as e: if e.response['Error']['Code'] == "DryRunOperation": module.exit_json(changed=True, placement_group={ "name": name, "state": 'DryRun', "strategy": strategy, }) module.fail_json_aws( e, msg="Couldn't create placement group [%s]" % name) module.exit_json(changed=True, placement_group=get_placement_group_details( connection, module )) @AWSRetry.exponential_backoff() def delete_placement_group(connection, module): name = module.params.get("name") try: connection.delete_placement_group( GroupName=name, DryRun=module.check_mode) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Couldn't delete placement group [%s]" % name) module.exit_json(changed=True) def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(type='str'), state=dict(default='present', choices=['present', 'absent']), strategy=dict(default='cluster', choices=['cluster', 'spread']) ) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) region, ec2_url, aws_connect_params = get_aws_connection_info( module, boto3=True) connection = boto3_conn(module, resource='ec2', conn_type='client', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get("state") if state == 'present': placement_group = get_placement_group_details(connection, module) if placement_group is None: create_placement_group(connection, module) else: strategy = module.params.get("strategy") if placement_group['strategy'] == strategy: module.exit_json( changed=False, placement_group=placement_group) else: name = module.params.get("name") module.fail_json( msg=("Placement group '{}' exists, can't change strategy" + " from '{}' to '{}'").format( name, placement_group['strategy'], strategy)) elif state == 'absent': placement_group = get_placement_group_details(connection, module) if placement_group is None: module.exit_json(changed=False) else: delete_placement_group(connection, module) if __name__ == '__main__': main()
gpl-3.0
2014c2g3/w16b_test
static/Brython3.1.3-20150514-095342/Lib/operator.py
674
7736
#!/usr/bin/env python3 """ Operator Interface This module exports a set of functions corresponding to the intrinsic operators of Python. For example, operator.add(x, y) is equivalent to the expression x+y. The function names are those used for special methods; variants without leading and trailing '__' are also provided for convenience. This is the pure Python implementation of the module. """ # downloaded from http://bugs.python.org/file28327/operator.py #import builtins as _bi #there is no builtins module def lt(a, b): "Same as a < b." return a < b __lt__ = lt def le(a, b): "Same as a <= b." return a <= b __le__ = le def eq(a, b): "Same as a == b." return a == b __eq__ = eq def ne(a, b): "Same as a != b." return a != b __ne__ = ne def ge(a, b): "Same as a >= b." return a >= b __ge__ = ge def gt(a, b): "Same as a > b." return a > b __gt__ = gt def not_(a): "Same as not a." return not a __not__ = not_ def truth(a): "Return True if a is true, False otherwise." #return _bi.bool(a) return bool(a) def is_(a, b): "Same as a is b." return a is b # brython does not like (causes syntax error) #def is_not(a, b): # "Same as a is not b." # return a is not b #recursion error or just comment out and add code below function #def abs(a): # "Same as abs(a)." # #return _bi.abs(a) # return abs(a) __abs__ = abs abs=abs def add(a, b): "Same as a + b." return a + b __add__ = add def and_(a, b): "Same as a & b." return a & b __and__ = and_ def floordiv(a, b): "Same as a // b." return a // b __floordiv__ = floordiv def index(a): "Same as a.__index__()." return a.__index__() __index__ = index def inv(a): "Same as ~a." return ~a #brython does not like #return a^(2**31) invert = __inv__ = __invert__ = inv def lshift(a, b): "Same as a << b." return a << b __lshift__ = lshift def mod(a, b): "Same as a % b." return a % b __mod__ = mod def mul(a, b): "Same as a * b." return a * b __mul__ = mul def neg(a): "Same as -a." return -a __neg__ = neg def or_(a, b): "Same as a | b." return a | b __or__ = or_ def pos(a): "Same as +a." return +a #brython does not like if a >= 0: return a return -a __pos__ = pos def pow(a, b): "Same as a ** b." return a ** b __pow__ = pow def rshift(a, b): "Same as a >> b." return a >> b __rshift__ = rshift def sub(a, b): "Same as a - b." return a - b __sub__ = sub def truediv(a, b): "Same as a / b." return a / b __truediv__ = truediv def xor(a, b): "Same as a ^ b." return a ^ b __xor__ = xor def concat(a, b): "Same as a + b, for a and b sequences." if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')): raise TypeError('a and b must be sequences') return a + b __concat__ = concat def contains(a, b): "Same as b in a (note reversed operands)." return b in a __contains__ = contains def countOf(a, b): "Return the number of times b occurs in a." count = 0 for i in a: if i == b: count += 1 return count def delitem(a, b): "Same as del a[b]." del a[b] __delitem__ = delitem def getitem(a, b): "Same as a[b]." return a[b] __getitem__ = getitem #fixme brython doesn't like this function def indexOf(a, b): "Return the first index of b in a." #for i, j in _bi.enumerate(a): for i, j in enumerate(a): if j == b: return i else: raise ValueError('b not found in a') def setitem(a, b, c): "Same as a[b] = c." a[b] = c __setitem__ = setitem class attrgetter: """ Return a callable object that fetches the given attribute(s) from its operand. After f=attrgetter('name'), the call f(r) returns r.name. After g=attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). After h=attrgetter('name.first', 'name.last'), the call h(r) returns (r.name.first, r.name.last). """ def __init__(self, attr, *attrs): self._attrs = (attr,) self._attrs += attrs if any(not isinstance(attr, str) for attr in self._attrs): raise TypeError('attribute name must be a string') @staticmethod def _resolve_attr(obj, attr): for name in attr.split('.'): #obj = _bi.getattr(obj, name) obj = getattr(obj, name) return obj def __call__(self, obj): if len(self._attrs) == 1: return self._resolve_attr(obj, self._attrs[0]) return tuple(self._resolve_attr(obj, attr) for attr in self._attrs) class itemgetter: """ Return a callable object that fetches the given item(s) from its operand. After f=itemgetter(2), the call f(r) returns r[2]. After g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3]) """ def __init__(self, item, *items): self._items = (item,) self._items += items def __call__(self, obj): if len(self._items) == 1: return obj[self._items[0]] return tuple(obj[item] for item in self._items) class methodcaller: """ Return a callable object that calls the given method on its operand. After f = methodcaller('name'), the call f(r) returns r.name(). After g = methodcaller('name', 'date', foo=1), the call g(r) returns r.name('date', foo=1). """ def __init__(self, name, *args, **kwargs): self._name = name self._args = args self._kwargs = kwargs def __call__(self, obj): return getattr(obj, self._name)(*self._args, **self._kwargs) def iadd(a, b): "Same as a += b." a += b return a __iadd__ = iadd def iand(a, b): "Same as a &= b." a &= b return a __iand__ = iand def iconcat(a, b): "Same as a += b, for a and b sequences." if not (hasattr(a, '__getitem__') and hasattr(b, '__getitem__')): raise TypeError('a and b must be sequences') a += b return a __iconcat__ = iconcat def ifloordiv(a, b): "Same as a //= b." a //= b return a __ifloordiv__ = ifloordiv def ilshift(a, b): "Same as a <<= b." a <<= b return a __ilshift__ = ilshift def imod(a, b): "Same as a %= b." a %= b return a __imod__ = imod def imul(a, b): "Same as a *= b." a *= b return a __imul__ = imul def ior(a, b): "Same as a |= b." a |= b return a __ior__ = ior def ipow(a, b): "Same as a **= b." a **=b return a __ipow__ = ipow def irshift(a, b): "Same as a >>= b." a >>= b return a __irshift__ = irshift def isub(a, b): "Same as a -= b." a -= b return a __isub__ = isub def itruediv(a, b): "Same as a /= b." a /= b return a __itruediv__ = itruediv def ixor(a, b): "Same as a ^= b." a ^= b return a __ixor__ = ixor def length_hint(obj, default=0): """ Return an estimate of the number of items in obj. This is useful for presizing containers when building from an iterable. If the object supports len(), the result will be exact. Otherwise, it may over- or under-estimate by an arbitrary amount. The result will be an integer >= 0. """ try: return len(obj) except TypeError: try: val = obj.__length_hint__() if val is NotImplemented: raise TypeError except (AttributeError, TypeError): return default else: if not val > 0: raise ValueError('default must be > 0') return val #try: # from _operator import * # from _operator import __doc__ #except ImportError: # pass
agpl-3.0
hwjworld/xiaodun-platform
cms/djangoapps/contentstore/features/course-export.py
10
1659
# disable missing docstring #pylint: disable=C0111 from lettuce import world, step from component_settings_editor_helpers import enter_xml_in_advanced_problem from nose.tools import assert_true, assert_equal @step('I export the course$') def i_export_the_course(step): world.click_tools() link_css = 'li.nav-course-tools-export a' world.css_click(link_css) world.css_click('a.action-export') @step('I edit and enter bad XML$') def i_enter_bad_xml(step): enter_xml_in_advanced_problem(step, """<problem><h1>Smallest Canvas</h1> <p>You want to make the smallest canvas you can.</p> <multiplechoiceresponse> <choicegroup type="MultipleChoice"> <choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice> <choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice> </choicegroup> </multiplechoiceresponse> </problem>""" ) @step('I edit and enter an ampersand$') def i_enter_bad_xml(step): enter_xml_in_advanced_problem(step, "<problem>&</problem>") @step('I get an error dialog$') def get_an_error_dialog(step): assert_true(world.is_css_present("div.prompt.error")) @step('I can click to go to the unit with the error$') def i_click_on_error_dialog(step): world.click_link_by_text('Correct failed component') assert_true(world.css_html("span.inline-error").startswith("Problem i4x://MITx/999/problem")) assert_equal(1, world.browser.url.count("unit/MITx.999.Robot_Super_Course/branch/draft/block/vertical"))
agpl-3.0
att-comdev/drydock
drydock_provisioner/orchestrator/validations/hostname_validity.py
1
1295
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from drydock_provisioner.orchestrator.validations.validators import Validators class HostnameValidity(Validators): def __init__(self): super().__init__('Hostname Validity', 'DD3003') def run_validation(self, site_design, orchestrator=None): """Validate that node hostnames do not contain '__' """ node_list = site_design.baremetal_nodes or [] invalid_nodes = [n for n in node_list if '__' in n.name] for n in invalid_nodes: msg = "Hostname %s invalid." % n.name self.report_error( msg, [n.doc_ref], "Hostnames cannot contain '__' (double underscore)") return
apache-2.0
CyanogenMod/android_external_chromium-trace
trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/_stream_hybi.py
29
30984
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file provides classes and helper functions for parsing/building frames of the WebSocket protocol (RFC 6455). Specification: http://tools.ietf.org/html/rfc6455 """ from collections import deque import logging import os import struct import time from mod_pywebsocket import common from mod_pywebsocket import util from mod_pywebsocket._stream_base import BadOperationException from mod_pywebsocket._stream_base import ConnectionTerminatedException from mod_pywebsocket._stream_base import InvalidFrameException from mod_pywebsocket._stream_base import InvalidUTF8Exception from mod_pywebsocket._stream_base import StreamBase from mod_pywebsocket._stream_base import UnsupportedFrameException _NOOP_MASKER = util.NoopMasker() class Frame(object): def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0, opcode=None, payload=''): self.fin = fin self.rsv1 = rsv1 self.rsv2 = rsv2 self.rsv3 = rsv3 self.opcode = opcode self.payload = payload # Helper functions made public to be used for writing unittests for WebSocket # clients. def create_length_header(length, mask): """Creates a length header. Args: length: Frame length. Must be less than 2^63. mask: Mask bit. Must be boolean. Raises: ValueError: when bad data is given. """ if mask: mask_bit = 1 << 7 else: mask_bit = 0 if length < 0: raise ValueError('length must be non negative integer') elif length <= 125: return chr(mask_bit | length) elif length < (1 << 16): return chr(mask_bit | 126) + struct.pack('!H', length) elif length < (1 << 63): return chr(mask_bit | 127) + struct.pack('!Q', length) else: raise ValueError('Payload is too big for one frame') def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask): """Creates a frame header. Raises: Exception: when bad data is given. """ if opcode < 0 or 0xf < opcode: raise ValueError('Opcode out of range') if payload_length < 0 or (1 << 63) <= payload_length: raise ValueError('payload_length out of range') if (fin | rsv1 | rsv2 | rsv3) & ~1: raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1') header = '' first_byte = ((fin << 7) | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) | opcode) header += chr(first_byte) header += create_length_header(payload_length, mask) return header def _build_frame(header, body, mask): if not mask: return header + body masking_nonce = os.urandom(4) masker = util.RepeatedXorMasker(masking_nonce) return header + masking_nonce + masker.mask(body) def _filter_and_format_frame_object(frame, mask, frame_filters): for frame_filter in frame_filters: frame_filter.filter(frame) header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_binary_frame( message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]): """Creates a simple binary frame with no extension, reserved bit.""" frame = Frame(fin=fin, opcode=opcode, payload=message) return _filter_and_format_frame_object(frame, mask, frame_filters) def create_text_frame( message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]): """Creates a simple text frame with no extension, reserved bit.""" encoded_message = message.encode('utf-8') return create_binary_frame(encoded_message, opcode, fin, mask, frame_filters) def parse_frame(receive_bytes, logger=None, ws_version=common.VERSION_HYBI_LATEST, unmask_receive=True): """Parses a frame. Returns a tuple containing each header field and payload. Args: receive_bytes: a function that reads frame data from a stream or something similar. The function takes length of the bytes to be read. The function must raise ConnectionTerminatedException if there is not enough data to be read. logger: a logging object. ws_version: the version of WebSocket protocol. unmask_receive: unmask received frames. When received unmasked frame, raises InvalidFrameException. Raises: ConnectionTerminatedException: when receive_bytes raises it. InvalidFrameException: when the frame contains invalid data. """ if not logger: logger = logging.getLogger() logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame') received = receive_bytes(2) first_byte = ord(received[0]) fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf second_byte = ord(received[1]) mask = (second_byte >> 7) & 1 payload_length = second_byte & 0x7f logger.log(common.LOGLEVEL_FINE, 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, ' 'Mask=%s, Payload_length=%s', fin, rsv1, rsv2, rsv3, opcode, mask, payload_length) if (mask == 1) != unmask_receive: raise InvalidFrameException( 'Mask bit on the received frame did\'nt match masking ' 'configuration for received frames') # The HyBi and later specs disallow putting a value in 0x0-0xFFFF # into the 8-octet extended payload length field (or 0x0-0xFD in # 2-octet field). valid_length_encoding = True length_encoding_bytes = 1 if payload_length == 127: logger.log(common.LOGLEVEL_FINE, 'Receive 8-octet extended payload length') extended_payload_length = receive_bytes(8) payload_length = struct.unpack( '!Q', extended_payload_length)[0] if payload_length > 0x7FFFFFFFFFFFFFFF: raise InvalidFrameException( 'Extended payload length >= 2^63') if ws_version >= 13 and payload_length < 0x10000: valid_length_encoding = False length_encoding_bytes = 8 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) elif payload_length == 126: logger.log(common.LOGLEVEL_FINE, 'Receive 2-octet extended payload length') extended_payload_length = receive_bytes(2) payload_length = struct.unpack( '!H', extended_payload_length)[0] if ws_version >= 13 and payload_length < 126: valid_length_encoding = False length_encoding_bytes = 2 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) if not valid_length_encoding: logger.warning( 'Payload length is not encoded using the minimal number of ' 'bytes (%d is encoded using %d bytes)', payload_length, length_encoding_bytes) if mask == 1: logger.log(common.LOGLEVEL_FINE, 'Receive mask') masking_nonce = receive_bytes(4) masker = util.RepeatedXorMasker(masking_nonce) logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce) else: masker = _NOOP_MASKER logger.log(common.LOGLEVEL_FINE, 'Receive payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): receive_start = time.time() raw_payload_bytes = receive_bytes(payload_length) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done receiving payload data at %s MB/s', payload_length / (time.time() - receive_start) / 1000 / 1000) logger.log(common.LOGLEVEL_FINE, 'Unmask payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): unmask_start = time.time() bytes = masker.mask(raw_payload_bytes) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done unmasking payload data at %s MB/s', payload_length / (time.time() - unmask_start) / 1000 / 1000) return opcode, bytes, fin, rsv1, rsv2, rsv3 class FragmentedFrameBuilder(object): """A stateful class to send a message as fragments.""" def __init__(self, mask, frame_filters=[], encode_utf8=True): """Constructs an instance.""" self._mask = mask self._frame_filters = frame_filters # This is for skipping UTF-8 encoding when building text type frames # from compressed data. self._encode_utf8 = encode_utf8 self._started = False # Hold opcode of the first frame in messages to verify types of other # frames in the message are all the same. self._opcode = common.OPCODE_TEXT def build(self, message, end, binary): if binary: frame_type = common.OPCODE_BINARY else: frame_type = common.OPCODE_TEXT if self._started: if self._opcode != frame_type: raise ValueError('Message types are different in frames for ' 'the same message') opcode = common.OPCODE_CONTINUATION else: opcode = frame_type self._opcode = frame_type if end: self._started = False fin = 1 else: self._started = True fin = 0 if binary or not self._encode_utf8: return create_binary_frame( message, opcode, fin, self._mask, self._frame_filters) else: return create_text_frame( message, opcode, fin, self._mask, self._frame_filters) def _create_control_frame(opcode, body, mask, frame_filters): frame = Frame(opcode=opcode, payload=body) for frame_filter in frame_filters: frame_filter.filter(frame) if len(frame.payload) > 125: raise BadOperationException( 'Payload data size of control frames must be 125 bytes or less') header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_ping_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters) def create_pong_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters) def create_close_frame(body, mask=False, frame_filters=[]): return _create_control_frame( common.OPCODE_CLOSE, body, mask, frame_filters) def create_closing_handshake_body(code, reason): body = '' if code is not None: if (code > common.STATUS_USER_PRIVATE_MAX or code < common.STATUS_NORMAL_CLOSURE): raise BadOperationException('Status code is out of range') if (code == common.STATUS_NO_STATUS_RECEIVED or code == common.STATUS_ABNORMAL_CLOSURE or code == common.STATUS_TLS_HANDSHAKE): raise BadOperationException('Status code is reserved pseudo ' 'code') encoded_reason = reason.encode('utf-8') body = struct.pack('!H', code) + encoded_reason return body class StreamOptions(object): """Holds option values to configure Stream objects.""" def __init__(self): """Constructs StreamOptions.""" # Enables deflate-stream extension. self.deflate_stream = False # Filters applied to frames. self.outgoing_frame_filters = [] self.incoming_frame_filters = [] # Filters applied to messages. Control frames are not affected by them. self.outgoing_message_filters = [] self.incoming_message_filters = [] self.encode_text_message_to_utf8 = True self.mask_send = False self.unmask_receive = True # RFC6455 disallows fragmented control frames, but mux extension # relaxes the restriction. self.allow_fragmented_control_frame = False class Stream(StreamBase): """A class for parsing/building frames of the WebSocket protocol (RFC 6455). """ def __init__(self, request, options): """Constructs an instance. Args: request: mod_python request. """ StreamBase.__init__(self, request) self._logger = util.get_class_logger(self) self._options = options if self._options.deflate_stream: self._logger.debug('Setup filter for deflate-stream') self._request = util.DeflateRequest(self._request) self._request.client_terminated = False self._request.server_terminated = False # Holds body of received fragments. self._received_fragments = [] # Holds the opcode of the first fragment. self._original_opcode = None self._writer = FragmentedFrameBuilder( self._options.mask_send, self._options.outgoing_frame_filters, self._options.encode_text_message_to_utf8) self._ping_queue = deque() def _receive_frame(self): """Receives a frame and return data in the frame as a tuple containing each header field and payload separately. Raises: ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. """ def _receive_bytes(length): return self.receive_bytes(length) return parse_frame(receive_bytes=_receive_bytes, logger=self._logger, ws_version=self._request.ws_version, unmask_receive=self._options.unmask_receive) def _receive_frame_as_frame_object(self): opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame() return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3, opcode=opcode, payload=bytes) def send_message(self, message, end=True, binary=False): """Send message. Args: message: text in unicode or binary in str to send. binary: send message as binary frame. Raises: BadOperationException: when called on a server-terminated connection or called with inconsistent message type or binary parameter. """ if self._request.server_terminated: raise BadOperationException( 'Requested send_message after sending out a closing handshake') if binary and isinstance(message, unicode): raise BadOperationException( 'Message for binary frame must be instance of str') for message_filter in self._options.outgoing_message_filters: message = message_filter.filter(message, end, binary) try: self._write(self._writer.build(message, end, binary)) except ValueError, e: raise BadOperationException(e) def _get_message_from_frame(self, frame): """Gets a message from frame. If the message is composed of fragmented frames and the frame is not the last fragmented frame, this method returns None. The whole message will be returned when the last fragmented frame is passed to this method. Raises: InvalidFrameException: when the frame doesn't match defragmentation context, or the frame contains invalid data. """ if frame.opcode == common.OPCODE_CONTINUATION: if not self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received a termination frame but fragmentation ' 'not started') else: raise InvalidFrameException( 'Received an intermediate frame but ' 'fragmentation not started') if frame.fin: # End of fragmentation frame self._received_fragments.append(frame.payload) message = ''.join(self._received_fragments) self._received_fragments = [] return message else: # Intermediate frame self._received_fragments.append(frame.payload) return None else: if self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received an unfragmented frame without ' 'terminating existing fragmentation') else: raise InvalidFrameException( 'New fragmentation started without terminating ' 'existing fragmentation') if frame.fin: # Unfragmented frame self._original_opcode = frame.opcode return frame.payload else: # Start of fragmentation frame if (not self._options.allow_fragmented_control_frame and common.is_control_opcode(frame.opcode)): raise InvalidFrameException( 'Control frames must not be fragmented') self._original_opcode = frame.opcode self._received_fragments.append(frame.payload) return None def _process_close_message(self, message): """Processes close message. Args: message: close message. Raises: InvalidFrameException: when the message is invalid. """ self._request.client_terminated = True # Status code is optional. We can have status reason only if we # have status code. Status reason can be empty string. So, # allowed cases are # - no application data: no code no reason # - 2 octet of application data: has code but no reason # - 3 or more octet of application data: both code and reason if len(message) == 0: self._logger.debug('Received close frame (empty body)') self._request.ws_close_code = ( common.STATUS_NO_STATUS_RECEIVED) elif len(message) == 1: raise InvalidFrameException( 'If a close frame has status code, the length of ' 'status code must be 2 octet') elif len(message) >= 2: self._request.ws_close_code = struct.unpack( '!H', message[0:2])[0] self._request.ws_close_reason = message[2:].decode( 'utf-8', 'replace') self._logger.debug( 'Received close frame (code=%d, reason=%r)', self._request.ws_close_code, self._request.ws_close_reason) # Drain junk data after the close frame if necessary. self._drain_received_data() if self._request.server_terminated: self._logger.debug( 'Received ack for server-initiated closing handshake') return self._logger.debug( 'Received client-initiated closing handshake') code = common.STATUS_NORMAL_CLOSURE reason = '' if hasattr(self._request, '_dispatcher'): dispatcher = self._request._dispatcher code, reason = dispatcher.passive_closing_handshake( self._request) if code is None and reason is not None and len(reason) > 0: self._logger.warning( 'Handler specified reason despite code being None') reason = '' if reason is None: reason = '' self._send_closing_handshake(code, reason) self._logger.debug( 'Sent ack for client-initiated closing handshake ' '(code=%r, reason=%r)', code, reason) def _process_ping_message(self, message): """Processes ping message. Args: message: ping message. """ try: handler = self._request.on_ping_handler if handler: handler(self._request, message) return except AttributeError, e: pass self._send_pong(message) def _process_pong_message(self, message): """Processes pong message. Args: message: pong message. """ # TODO(tyoshino): Add ping timeout handling. inflight_pings = deque() while True: try: expected_body = self._ping_queue.popleft() if expected_body == message: # inflight_pings contains pings ignored by the # other peer. Just forget them. self._logger.debug( 'Ping %r is acked (%d pings were ignored)', expected_body, len(inflight_pings)) break else: inflight_pings.append(expected_body) except IndexError, e: # The received pong was unsolicited pong. Keep the # ping queue as is. self._ping_queue = inflight_pings self._logger.debug('Received a unsolicited pong') break try: handler = self._request.on_pong_handler if handler: handler(self._request, message) except AttributeError, e: pass def receive_message(self): """Receive a WebSocket frame and return its payload as a text in unicode or a binary in str. Returns: payload data of the frame - as unicode instance if received text frame - as str instance if received binary frame or None iff received closing handshake. Raises: BadOperationException: when called on a client-terminated connection. ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. UnsupportedFrameException: when the received frame has flags, opcode we cannot handle. You can ignore this exception and continue receiving the next frame. """ if self._request.client_terminated: raise BadOperationException( 'Requested receive_message after receiving a closing ' 'handshake') while True: # mp_conn.read will block if no bytes are available. # Timeout is controlled by TimeOut directive of Apache. frame = self._receive_frame_as_frame_object() # Check the constraint on the payload size for control frames # before extension processes the frame. # See also http://tools.ietf.org/html/rfc6455#section-5.5 if (common.is_control_opcode(frame.opcode) and len(frame.payload) > 125): raise InvalidFrameException( 'Payload data size of control frames must be 125 bytes or ' 'less') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) if frame.rsv1 or frame.rsv2 or frame.rsv3: raise UnsupportedFrameException( 'Unsupported flag is set (rsv = %d%d%d)' % (frame.rsv1, frame.rsv2, frame.rsv3)) message = self._get_message_from_frame(frame) if message is None: continue for message_filter in self._options.incoming_message_filters: message = message_filter.filter(message) if self._original_opcode == common.OPCODE_TEXT: # The WebSocket protocol section 4.4 specifies that invalid # characters must be replaced with U+fffd REPLACEMENT # CHARACTER. try: return message.decode('utf-8') except UnicodeDecodeError, e: raise InvalidUTF8Exception(e) elif self._original_opcode == common.OPCODE_BINARY: return message elif self._original_opcode == common.OPCODE_CLOSE: self._process_close_message(message) return None elif self._original_opcode == common.OPCODE_PING: self._process_ping_message(message) elif self._original_opcode == common.OPCODE_PONG: self._process_pong_message(message) else: raise UnsupportedFrameException( 'Opcode %d is not supported' % self._original_opcode) def _send_closing_handshake(self, code, reason): body = create_closing_handshake_body(code, reason) frame = create_close_frame( body, mask=self._options.mask_send, frame_filters=self._options.outgoing_frame_filters) self._request.server_terminated = True self._write(frame) def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''): """Closes a WebSocket connection. Args: code: Status code for close frame. If code is None, a close frame with empty body will be sent. reason: string representing close reason. Raises: BadOperationException: when reason is specified with code None or reason is not an instance of both str and unicode. """ if self._request.server_terminated: self._logger.debug( 'Requested close_connection but server is already terminated') return if code is None: if reason is not None and len(reason) > 0: raise BadOperationException( 'close reason must not be specified if code is None') reason = '' else: if not isinstance(reason, str) and not isinstance(reason, unicode): raise BadOperationException( 'close reason must be an instance of str or unicode') self._send_closing_handshake(code, reason) self._logger.debug( 'Sent server-initiated closing handshake (code=%r, reason=%r)', code, reason) if (code == common.STATUS_GOING_AWAY or code == common.STATUS_PROTOCOL_ERROR): # It doesn't make sense to wait for a close frame if the reason is # protocol error or that the server is going away. For some of # other reasons, it might not make sense to wait for a close frame, # but it's not clear, yet. return # TODO(ukai): 2. wait until the /client terminated/ flag has been set, # or until a server-defined timeout expires. # # For now, we expect receiving closing handshake right after sending # out closing handshake. message = self.receive_message() if message is not None: raise ConnectionTerminatedException( 'Didn\'t receive valid ack for closing handshake') # TODO: 3. close the WebSocket connection. # note: mod_python Connection (mp_conn) doesn't have close method. def send_ping(self, body=''): frame = create_ping_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) self._ping_queue.append(body) def _send_pong(self, body): frame = create_pong_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) def get_last_received_opcode(self): """Returns the opcode of the WebSocket message which the last received frame belongs to. The return value is valid iff immediately after receive_message call. """ return self._original_opcode def _drain_received_data(self): """Drains unread data in the receive buffer to avoid sending out TCP RST packet. This is because when deflate-stream is enabled, some DEFLATE block for flushing data may follow a close frame. If any data remains in the receive buffer of a socket when the socket is closed, it sends out TCP RST packet to the other peer. Since mod_python's mp_conn object doesn't support non-blocking read, we perform this only when pywebsocket is running in standalone mode. """ # If self._options.deflate_stream is true, self._request is # DeflateRequest, so we can get wrapped request object by # self._request._request. # # Only _StandaloneRequest has _drain_received_data method. if (self._options.deflate_stream and ('_drain_received_data' in dir(self._request._request))): self._request._request._drain_received_data() # vi:sts=4 sw=4 et
bsd-3-clause
alaski/nova
nova/servicegroup/drivers/base.py
71
1052
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class Driver(object): """Base class for all ServiceGroup drivers.""" def join(self, member, group, service=None): """Add a new member to a service group. :param member: the joined member ID/name :param group: the group ID/name, of the joined member :param service: a `nova.service.Service` object """ raise NotImplementedError() def is_up(self, member): """Check whether the given member is up.""" raise NotImplementedError()
apache-2.0
ilyes14/scikit-learn
sklearn/semi_supervised/tests/test_label_propagation.py
307
1974
""" test the label propagation module """ import nose import numpy as np from sklearn.semi_supervised import label_propagation from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal ESTIMATORS = [ (label_propagation.LabelPropagation, {'kernel': 'rbf'}), (label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}), (label_propagation.LabelSpreading, {'kernel': 'rbf'}), (label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2}) ] def test_fit_transduction(): samples = [[1., 0.], [0., 2.], [1., 3.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) nose.tools.assert_equal(clf.transduction_[2], 1) def test_distribution(): samples = [[1., 0.], [0., 1.], [1., 1.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) if parameters['kernel'] == 'knn': continue # unstable test; changes in k-NN ordering break it assert_array_almost_equal(clf.predict_proba([[1., 0.0]]), np.array([[1., 0.]]), 2) else: assert_array_almost_equal(np.asarray(clf.label_distributions_[2]), np.array([.5, .5]), 2) def test_predict(): samples = [[1., 0.], [0., 2.], [1., 3.]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1])) def test_predict_proba(): samples = [[1., 0.], [0., 1.], [1., 2.5]] labels = [0, 1, -1] for estimator, parameters in ESTIMATORS: clf = estimator(**parameters).fit(samples, labels) assert_array_almost_equal(clf.predict_proba([[1., 1.]]), np.array([[0.5, 0.5]]))
bsd-3-clause
AnthonyBroadCrawford/servo
tests/wpt/web-platform-tests/tools/six/six.py
426
27961
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2014 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.8.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return iter(d.iterkeys(**kw)) def itervalues(d, **kw): return iter(d.itervalues(**kw)) def iteritems(d, **kw): return iter(d.iteritems(**kw)) def iterlists(d, **kw): return iter(d.iterlists(**kw)) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) if sys.version_info > (3, 2): exec_("""def raise_from(value, from_value): raise value from from_value """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
mpl-2.0
Hackplayers/Empire-mod-Hpys-tests
lib/modules/powershell/situational_awareness/network/get_sql_instance_domain.py
2
4763
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name' : 'Get-SQLInstanceDomain', 'Author': ['@_nullbind', '@0xbadjuju'], 'Description': ('Returns a list of SQL Server instances discovered by querying ' 'a domain controller for systems with registered MSSQL service ' 'principal names. The function will default to the current user\'s ' 'domain and logon server, but an alternative domain controller ' 'can be provided. UDP scanning of management servers is optional.'), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : True, 'Language' : 'powershell', 'MinPSVersion' : '2', 'MinLanguageVersion' : '2', 'Comments': [ 'https://github.com/NetSPI/PowerUpSQL/blob/master/PowerUpSQL.ps1' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'DomainController' : { 'Description' : "Domain controller for Domain and Site that you want to query against.", 'Required' : False, 'Value' : '' }, 'ComputerName' : { 'Description' : 'Computer name to filter for.', 'Required' : False, 'Value' : '' }, 'DomainServiceAccount' : { 'Description' : 'Domain account to filter for.', 'Required' : False, 'Value' : '' }, 'CheckMgmt' : { 'Description' : 'Performs UDP scan of servers managing SQL Server clusters.', 'Required' : False, 'Value' : 'False' }, 'UDPTimeOut' : { 'Description' : 'Timeout in seconds for UDP scans of management servers. Longer timeout = more accurate.', 'Required' : False, 'Value' : '3' }, 'Username' : { 'Description' : 'SQL Server or domain account to authenticate with.', 'Required' : False, 'Value' : '' }, 'Password' : { 'Description' : 'SQL Server or domain account password to authenticate with.', 'Required' : False, 'Value' : '' } } self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): domainController = self.options['DomainController']['Value'] computerName = self.options['ComputerName']['Value'] domainAccount = self.options['DomainServiceAccount']['Value'] checkMgmt = self.options['CheckMgmt']['Value'] udpTimeOut = self.options['UDPTimeOut']['Value'] username = self.options['Username']['Value'] password = self.options['Password']['Value'] # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Get-SQLInstanceDomain.ps1" script = "" try: with open(moduleSource, 'r') as source: script = source.read() except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" script += " Get-SQLInstanceDomain" if username != "": script += " -Username " + username if password != "": script += " -Password " + password if domainController != "": script += " -DomainController "+domainController if computerName != "": script += " -ComputerName "+computerName if domainAccount != "": script += " -DomainAccount "+domainAccount if checkMgmt.lower() != "false": script += " -CheckMgmt" if udpTimeOut != "": script += " -UDPTimeOut "+udpTimeOut return script
bsd-3-clause
kawasaki2013/python-for-android-x86
python3-alpha/python3-src/Lib/test/time_hashlib.py
167
2887
# It's intended that this script be run by hand. It runs speed tests on # hashlib functions; it does not test for correctness. import sys, time import hashlib def creatorFunc(): raise RuntimeError("eek, creatorFunc not overridden") def test_scaled_msg(scale, name): iterations = 106201/scale * 20 longStr = 'Z'*scale localCF = creatorFunc start = time.time() for f in range(iterations): x = localCF(longStr).digest() end = time.time() print(('%2.2f' % (end-start)), "seconds", iterations, "x", len(longStr), "bytes", name) def test_create(): start = time.time() for f in range(20000): d = creatorFunc() end = time.time() print(('%2.2f' % (end-start)), "seconds", '[20000 creations]') def test_zero(): start = time.time() for f in range(20000): x = creatorFunc().digest() end = time.time() print(('%2.2f' % (end-start)), "seconds", '[20000 "" digests]') hName = sys.argv[1] # # setup our creatorFunc to test the requested hash # if hName in ('_md5', '_sha'): exec('import '+hName) exec('creatorFunc = '+hName+'.new') print("testing speed of old", hName, "legacy interface") elif hName == '_hashlib' and len(sys.argv) > 3: import _hashlib exec('creatorFunc = _hashlib.%s' % sys.argv[2]) print("testing speed of _hashlib.%s" % sys.argv[2], getattr(_hashlib, sys.argv[2])) elif hName == '_hashlib' and len(sys.argv) == 3: import _hashlib exec('creatorFunc = lambda x=_hashlib.new : x(%r)' % sys.argv[2]) print("testing speed of _hashlib.new(%r)" % sys.argv[2]) elif hasattr(hashlib, hName) and hasattr(getattr(hashlib, hName), '__call__'): creatorFunc = getattr(hashlib, hName) print("testing speed of hashlib."+hName, getattr(hashlib, hName)) else: exec("creatorFunc = lambda x=hashlib.new : x(%r)" % hName) print("testing speed of hashlib.new(%r)" % hName) try: test_create() except ValueError: print() print("pass argument(s) naming the hash to run a speed test on:") print(" '_md5' and '_sha' test the legacy builtin md5 and sha") print(" '_hashlib' 'openssl_hName' 'fast' tests the builtin _hashlib") print(" '_hashlib' 'hName' tests builtin _hashlib.new(shaFOO)") print(" 'hName' tests the hashlib.hName() implementation if it exists") print(" otherwise it uses hashlib.new(hName).") print() raise test_zero() test_scaled_msg(scale=106201, name='[huge data]') test_scaled_msg(scale=10620, name='[large data]') test_scaled_msg(scale=1062, name='[medium data]') test_scaled_msg(scale=424, name='[4*small data]') test_scaled_msg(scale=336, name='[3*small data]') test_scaled_msg(scale=212, name='[2*small data]') test_scaled_msg(scale=106, name='[small data]') test_scaled_msg(scale=creatorFunc().digest_size, name='[digest_size data]') test_scaled_msg(scale=10, name='[tiny data]')
apache-2.0
Shine-/xbmc
lib/libUPnP/Neptune/Build/Tools/SCons/gcc-generic.py
199
1249
import os def generate(env, gcc_cross_prefix=None, gcc_strict=True, gcc_stop_on_warning=None): if gcc_stop_on_warning == None: gcc_stop_on_warning = env['stop_on_warning'] ### compiler flags if gcc_strict: env.AppendUnique(CCFLAGS = ['-pedantic', '-Wall', '-W', '-Wundef', '-Wno-long-long']) env.AppendUnique(CFLAGS = ['-Wmissing-prototypes', '-Wmissing-declarations']) else: env.AppendUnique(CCFLAGS = ['-Wall']) compiler_defines = ['-D_REENTRANT'] env.AppendUnique(CCFLAGS = compiler_defines) env.AppendUnique(CPPFLAGS = compiler_defines) if env['build_config'] == 'Debug': env.AppendUnique(CCFLAGS = '-g') else: env.AppendUnique(CCFLAGS = '-O3') if gcc_stop_on_warning: env.AppendUnique(CCFLAGS = ['-Werror']) if gcc_cross_prefix: env['ENV']['PATH'] += os.environ['PATH'] env['AR'] = gcc_cross_prefix+'-ar' env['RANLIB'] = gcc_cross_prefix+'-ranlib' env['CC'] = gcc_cross_prefix+'-gcc' env['CXX'] = gcc_cross_prefix+'-g++' env['LINK'] = gcc_cross_prefix+'-g++' if gcc_cross_prefix: env['ENV']['PATH'] = os.environ['PATH'] + ':' + env['ENV']['PATH']
gpl-2.0
vaygr/ansible
lib/ansible/modules/network/illumos/flowadm.py
23
14817
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Adam Števko <adam.stevko@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: flowadm short_description: Manage bandwidth resource control and priority for protocols, services and zones on Solaris/illumos systems description: - Create/modify/remove networking bandwidth and associated resources for a type of traffic on a particular link. version_added: "2.2" author: Adam Števko (@xen0l) options: name: description: > - A flow is defined as a set of attributes based on Layer 3 and Layer 4 headers, which can be used to identify a protocol, service, or a zone. required: true aliases: [ 'flow' ] link: description: - Specifiies a link to configure flow on. required: false local_ip: description: - Identifies a network flow by the local IP address. required: false remote_ip: description: - Identifies a network flow by the remote IP address. required: false transport: description: > - Specifies a Layer 4 protocol to be used. It is typically used in combination with I(local_port) to identify the service that needs special attention. required: false local_port: description: - Identifies a service specified by the local port. required: false dsfield: description: > - Identifies the 8-bit differentiated services field (as defined in RFC 2474). The optional dsfield_mask is used to state the bits of interest in the differentiated services field when comparing with the dsfield value. Both values must be in hexadecimal. required: false maxbw: description: > - Sets the full duplex bandwidth for the flow. The bandwidth is specified as an integer with one of the scale suffixes(K, M, or G for Kbps, Mbps, and Gbps). If no units are specified, the input value will be read as Mbps. required: false priority: description: - Sets the relative priority for the flow. required: false default: 'medium' choices: [ 'low', 'medium', 'high' ] temporary: description: - Specifies that the configured flow is temporary. Temporary flows do not persist across reboots. required: false default: false choices: [ "true", "false" ] state: description: - Create/delete/enable/disable an IP address on the network interface. required: false default: present choices: [ 'absent', 'present', 'resetted' ] ''' EXAMPLES = ''' # Limit SSH traffic to 100M via vnic0 interface - flowadm: link: vnic0 flow: ssh_out transport: tcp local_port: 22 maxbw: 100M state: present # Reset flow properties - flowadm: name: dns state: resetted # Configure policy for EF PHB (DSCP value of 101110 from RFC 2598) with a bandwidth of 500 Mbps and a high priority. - flowadm: link: bge0 dsfield: '0x2e:0xfc' maxbw: 500M priority: high flow: efphb-flow state: present ''' RETURN = ''' name: description: flow name returned: always type: string sample: "http_drop" link: description: flow's link returned: if link is defined type: string sample: "vnic0" state: description: state of the target returned: always type: string sample: "present" temporary: description: flow's persistence returned: always type: boolean sample: "True" priority: description: flow's priority returned: if priority is defined type: string sample: "low" transport: description: flow's transport returned: if transport is defined type: string sample: "tcp" maxbw: description: flow's maximum bandwidth returned: if maxbw is defined type: string sample: "100M" local_Ip: description: flow's local IP address returned: if local_ip is defined type: string sample: "10.0.0.42" local_port: description: flow's local port returned: if local_port is defined type: int sample: 1337 remote_Ip: description: flow's remote IP address returned: if remote_ip is defined type: string sample: "10.0.0.42" dsfield: description: flow's differentiated services value returned: if dsfield is defined type: string sample: "0x2e:0xfc" ''' import socket from ansible.module_utils.basic import AnsibleModule SUPPORTED_TRANSPORTS = ['tcp', 'udp', 'sctp', 'icmp', 'icmpv6'] SUPPORTED_PRIORITIES = ['low', 'medium', 'high'] SUPPORTED_ATTRIBUTES = ['local_ip', 'remote_ip', 'transport', 'local_port', 'dsfield'] SUPPORTPED_PROPERTIES = ['maxbw', 'priority'] class Flow(object): def __init__(self, module): self.module = module self.name = module.params['name'] self.link = module.params['link'] self.local_ip = module.params['local_ip'] self.remote_ip = module.params['remote_ip'] self.transport = module.params['transport'] self.local_port = module.params['local_port'] self.dsfield = module.params['dsfield'] self.maxbw = module.params['maxbw'] self.priority = module.params['priority'] self.temporary = module.params['temporary'] self.state = module.params['state'] self._needs_updating = { 'maxbw': False, 'priority': False, } @classmethod def is_valid_port(cls, port): return 1 <= int(port) <= 65535 @classmethod def is_valid_address(cls, ip): if ip.count('/') == 1: ip_address, netmask = ip.split('/') else: ip_address = ip if len(ip_address.split('.')) == 4: try: socket.inet_pton(socket.AF_INET, ip_address) except socket.error: return False if not 0 <= netmask <= 32: return False else: try: socket.inet_pton(socket.AF_INET6, ip_address) except socket.error: return False if not 0 <= netmask <= 128: return False return True @classmethod def is_hex(cls, number): try: int(number, 16) except ValueError: return False return True @classmethod def is_valid_dsfield(cls, dsfield): dsmask = None if dsfield.count(':') == 1: dsval = dsfield.split(':')[0] else: dsval, dsmask = dsfield.split(':') if dsmask and not 0x01 <= int(dsmask, 16) <= 0xff and not 0x01 <= int(dsval, 16) <= 0xff: return False elif not 0x01 <= int(dsval, 16) <= 0xff: return False return True def flow_exists(self): cmd = [self.module.get_bin_path('flowadm')] cmd.append('show-flow') cmd.append(self.name) (rc, _, _) = self.module.run_command(cmd) if rc == 0: return True else: return False def delete_flow(self): cmd = [self.module.get_bin_path('flowadm')] cmd.append('remove-flow') if self.temporary: cmd.append('-t') cmd.append(self.name) return self.module.run_command(cmd) def create_flow(self): cmd = [self.module.get_bin_path('flowadm')] cmd.append('add-flow') cmd.append('-l') cmd.append(self.link) if self.local_ip: cmd.append('-a') cmd.append('local_ip=' + self.local_ip) if self.remote_ip: cmd.append('-a') cmd.append('remote_ip=' + self.remote_ip) if self.transport: cmd.append('-a') cmd.append('transport=' + self.transport) if self.local_port: cmd.append('-a') cmd.append('local_port=' + self.local_port) if self.dsfield: cmd.append('-a') cmd.append('dsfield=' + self.dsfield) if self.maxbw: cmd.append('-p') cmd.append('maxbw=' + self.maxbw) if self.priority: cmd.append('-p') cmd.append('priority=' + self.priority) if self.temporary: cmd.append('-t') cmd.append(self.name) return self.module.run_command(cmd) def _query_flow_props(self): cmd = [self.module.get_bin_path('flowadm')] cmd.append('show-flowprop') cmd.append('-c') cmd.append('-o') cmd.append('property,possible') cmd.append(self.name) return self.module.run_command(cmd) def flow_needs_udpating(self): (rc, out, err) = self._query_flow_props() NEEDS_UPDATING = False if rc == 0: properties = (line.split(':') for line in out.rstrip().split('\n')) for prop, value in properties: if prop == 'maxbw' and self.maxbw != value: self._needs_updating.update({prop: True}) NEEDS_UPDATING = True elif prop == 'priority' and self.priority != value: self._needs_updating.update({prop: True}) NEEDS_UPDATING = True return NEEDS_UPDATING else: self.module.fail_json(msg='Error while checking flow properties: %s' % err, stderr=err, rc=rc) def update_flow(self): cmd = [self.module.get_bin_path('flowadm')] cmd.append('set-flowprop') if self.maxbw and self._needs_updating['maxbw']: cmd.append('-p') cmd.append('maxbw=' + self.maxbw) if self.priority and self._needs_updating['priority']: cmd.append('-p') cmd.append('priority=' + self.priority) if self.temporary: cmd.append('-t') cmd.append(self.name) return self.module.run_command(cmd) def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['flow']), link=dict(required=False), local_ip=dict(required=False), remote_ip=dict(required=False), transport=dict(required=False, choices=SUPPORTED_TRANSPORTS), local_port=dict(required=False), dsfield=dict(required=False), maxbw=dict(required=False), priority=dict(required=False, default='medium', choices=SUPPORTED_PRIORITIES), temporary=dict(default=False, type='bool'), state=dict(required=False, default='present', choices=['absent', 'present', 'resetted']), ), mutually_exclusive=[ ('local_ip', 'remote_ip'), ('local_ip', 'transport'), ('local_ip', 'local_port'), ('local_ip', 'dsfield'), ('remote_ip', 'transport'), ('remote_ip', 'local_port'), ('remote_ip', 'dsfield'), ('transport', 'dsfield'), ('local_port', 'dsfield'), ], supports_check_mode=True ) flow = Flow(module) rc = None out = '' err = '' result = {} result['name'] = flow.name result['state'] = flow.state result['temporary'] = flow.temporary if flow.link: result['link'] = flow.link if flow.maxbw: result['maxbw'] = flow.maxbw if flow.priority: result['priority'] = flow.priority if flow.local_ip: if flow.is_valid_address(flow.local_ip): result['local_ip'] = flow.local_ip if flow.remote_ip: if flow.is_valid_address(flow.remote_ip): result['remote_ip'] = flow.remote_ip if flow.transport: result['transport'] = flow.transport if flow.local_port: if flow.is_valid_port(flow.local_port): result['local_port'] = flow.local_port else: module.fail_json(msg='Invalid port: %s' % flow.local_port, rc=1) if flow.dsfield: if flow.is_valid_dsfield(flow.dsfield): result['dsfield'] = flow.dsfield else: module.fail_json(msg='Invalid dsfield: %s' % flow.dsfield, rc=1) if flow.state == 'absent': if flow.flow_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = flow.delete_flow() if rc != 0: module.fail_json(msg='Error while deleting flow: "%s"' % err, name=flow.name, stderr=err, rc=rc) elif flow.state == 'present': if not flow.flow_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = flow.create_flow() if rc != 0: module.fail_json(msg='Error while creating flow: "%s"' % err, name=flow.name, stderr=err, rc=rc) else: if flow.flow_needs_udpating(): (rc, out, err) = flow.update_flow() if rc != 0: module.fail_json(msg='Error while updating flow: "%s"' % err, name=flow.name, stderr=err, rc=rc) elif flow.state == 'resetted': if flow.flow_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = flow.reset_flow() if rc != 0: module.fail_json(msg='Error while resetting flow: "%s"' % err, name=flow.name, stderr=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
tkuhrt/fabric
bddtests/steps/orderer_util.py
1
10865
# Copyright IBM Corp. 2016 All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time import datetime import Queue from orderer import ab_pb2, ab_pb2_grpc from common import common_pb2 import bdd_test_util import bootstrap_util import bdd_grpc_util from grpc.beta import implementations from grpc.framework.interfaces.face.face import AbortionError from grpc.beta.interfaces import StatusCode # The default chain ID when the system is statically bootstrapped for testing TEST_CHAIN_ID = "testchainid" def _defaultDataFunction(index): payload = common_pb2.Payload( header = common_pb2.Header( chainHeader = common_pb2.ChainHeader( chainID = TEST_CHAIN_ID, type = common_pb2.ENDORSER_TRANSACTION, ), signatureHeader = common_pb2.SignatureHeader(), ), data = str("BDD test: {0}".format(datetime.datetime.utcnow())), ) envelope = common_pb2.Envelope( payload = payload.SerializeToString() ) return envelope class StreamHelper: def __init__(self): self.streamClosed = False self.sendQueue = Queue.Queue() self.receivedMessages = [] self.replyGenerator = None def setReplyGenerator(self, replyGenerator): assert self.replyGenerator == None, "reply generator already set!!" self.replyGenerator = replyGenerator def createSendGenerator(self, timeout = 2): while True: try: nextMsg = self.sendQueue.get(True, timeout) if nextMsg: yield nextMsg else: #None indicates desire to close send return except Queue.Empty: return def readMessage(self): for reply in self.readMessages(1): return reply assert False, "Received no messages" def readMessages(self, expectedCount): msgsReceived = [] counter = 0 try: for reply in self.replyGenerator: counter += 1 #print("received reply: {0}, counter = {1}".format(reply, counter)) msgsReceived.append(reply) if counter == int(expectedCount): break except AbortionError as networkError: self.handleNetworkError(networkError) return msgsReceived def handleNetworkError(self, networkError): if networkError.code == StatusCode.OUT_OF_RANGE and networkError.details == "EOF": print("Error received and ignored: {0}".format(networkError)) print() self.streamClosed = True else: raise Exception("Unexpected NetworkError: {0}".format(networkError)) class DeliverStreamHelper(StreamHelper): def __init__(self, ordererStub, entity, directory, nodeAdminTuple, timeout = 600): StreamHelper.__init__(self) self.nodeAdminTuple = nodeAdminTuple self.directory = directory self.entity = entity # Set the UpdateMessage and start the stream sendGenerator = self.createSendGenerator(timeout) self.replyGenerator = ordererStub.Deliver(sendGenerator, timeout + 1) def createSeekInfo(self, chainID, start = 'Oldest', end = 'Newest', behavior = 'FAIL_IF_NOT_READY'): seekInfo = ab_pb2.SeekInfo( start = seekPosition(start), stop = seekPosition(end), behavior = ab_pb2.SeekInfo.SeekBehavior.Value(behavior), ) return seekInfo def seekToRange(self, chainID = TEST_CHAIN_ID, start = 'Oldest', end = 'Newest'): seekInfo = self.createSeekInfo(start = start, end = end, chainID = chainID) envelope = bootstrap_util.createEnvelopeForMsg(directory=self.directory, chainId=chainID, msg=seekInfo, typeAsString="DELIVER_SEEK_INFO", nodeAdminTuple=self.nodeAdminTuple) self.sendQueue.put(envelope) def getBlocks(self): blocks = [] try: while True: reply = self.readMessage() if reply.HasField("block"): blocks.append(reply.block) #print("received reply: {0}, len(blocks) = {1}".format(reply, len(blocks))) else: if reply.status != common_pb2.SUCCESS: print("Got error: {0}".format(reply.status)) # print("Done receiving blocks") break except Exception as e: print("getBlocks got error: {0}".format(e) ) return blocks class UserRegistration: def __init__(self, userName, directory): self.userName= userName self.directory = directory self.tags = {} # Dictionary of composeService->atomic broadcast grpc Stub self.atomicBroadcastStubsDict = {} # composeService->StreamHelper self.abDeliversStreamHelperDict = {} def getUserName(self): return self.userName def closeStreams(self): for compose_service, deliverStreamHelper in self.abDeliversStreamHelperDict.iteritems(): deliverStreamHelper.sendQueue.put(None) def connectToDeliverFunction(self, context, composeService, nodeAdminTuple, timeout=1): 'Connect to the deliver function and drain messages to associated orderer queue' assert not composeService in self.abDeliversStreamHelperDict, "Already connected to deliver stream on {0}".format(composeService) streamHelper = DeliverStreamHelper(directory=self.directory, ordererStub=self.getABStubForComposeService(context=context, composeService=composeService), entity=self, nodeAdminTuple=nodeAdminTuple) self.abDeliversStreamHelperDict[composeService] = streamHelper return streamHelper def getDelivererStreamHelper(self, context, composeService): assert composeService in self.abDeliversStreamHelperDict, "NOT connected to deliver stream on {0}".format(composeService) return self.abDeliversStreamHelperDict[composeService] def broadcastMessages(self, context, numMsgsToBroadcast, composeService, chainID=TEST_CHAIN_ID, dataFunc=_defaultDataFunction): abStub = self.getABStubForComposeService(context, composeService) replyGenerator = abStub.Broadcast(generateBroadcastMessages(chainID=chainID, numToGenerate = int(numMsgsToBroadcast), dataFunc=dataFunc), 2) counter = 0 try: for reply in replyGenerator: counter += 1 print("{0} received reply: {1}, counter = {2}".format(self.getUserName(), reply, counter)) if counter == int(numMsgsToBroadcast): break except Exception as e: print("Got error: {0}".format(e) ) print("Got error") print("Done") assert counter == int(numMsgsToBroadcast), "counter = {0}, expected {1}".format(counter, numMsgsToBroadcast) def getABStubForComposeService(self, context, composeService): 'Return a Stub for the supplied composeService, will cache' if composeService in self.atomicBroadcastStubsDict: return self.atomicBroadcastStubsDict[composeService] # Get the IP address of the server that the user registered on root_certificates = self.directory.getTrustedRootsForOrdererNetworkAsPEM() ipAddress, port = bdd_test_util.getPortHostMapping(context.compose_containers, composeService, 7050) # print("ipAddress in getABStubForComposeService == {0}:{1}".format(ipAddress, port)) channel = bdd_grpc_util.getGRPCChannel(ipAddress=ipAddress, port=port, root_certificates=root_certificates, ssl_target_name_override=composeService) newABStub = ab_pb2_grpc.AtomicBroadcastStub(channel) self.atomicBroadcastStubsDict[composeService] = newABStub return newABStub # Registerses a user on a specific composeService def registerUser(context, secretMsg, composeService): userName = secretMsg['enrollId'] if 'ordererUsers' in context: pass else: context.ordererUsers = {} if userName in context.ordererUsers: raise Exception("Orderer user already registered: {0}".format(userName)) userRegistration = UserRegistration(secretMsg) context.ordererUsers[userName] = userRegistration return userRegistration def getUserRegistration(context, enrollId): userRegistration = None if 'ordererUsers' in context: pass else: ordererContext.ordererUsers = {} if enrollId in context.ordererUsers: userRegistration = context.ordererUsers[enrollId] else: raise Exception("Orderer user has not been registered: {0}".format(enrollId)) return userRegistration def seekPosition(position): if position == 'Oldest': return ab_pb2.SeekPosition(oldest = ab_pb2.SeekOldest()) elif position == 'Newest': return ab_pb2.SeekPosition(newest = ab_pb2.SeekNewest()) else: return ab_pb2.SeekPosition(specified = ab_pb2.SeekSpecified(number = position)) def convertSeek(utfString): try: return int(utfString) except ValueError: return str(utfString) def createSeekInfo(chainID = TEST_CHAIN_ID, start = 'Oldest', end = 'Newest', behavior = 'FAIL_IF_NOT_READY'): return common_pb2.Envelope( payload = common_pb2.Payload( header = common_pb2.Header( channel_header = common_pb2.ChannelHeader( channel_id = chainID ).SerializeToString(), signature_header = common_pb2.SignatureHeader().SerializeToString(), ), data = ab_pb2.SeekInfo( start = seekPosition(start), stop = seekPosition(end), behavior = ab_pb2.SeekInfo.SeekBehavior.Value(behavior), ).SerializeToString(), ).SerializeToString(), ) def generateBroadcastMessages(chainID = TEST_CHAIN_ID, numToGenerate = 3, timeToHoldOpen = 1, dataFunc =_defaultDataFunction): messages = [] for i in range(0, numToGenerate): messages.append(dataFunc(i)) for msg in messages: yield msg time.sleep(timeToHoldOpen)
apache-2.0